1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB_V(spdk_accel_sequence_finish, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_io { 130 TAILQ_ENTRY(bdev_ut_io) link; 131 }; 132 133 struct bdev_ut_channel { 134 TAILQ_HEAD(, bdev_ut_io) outstanding_io; 135 uint32_t outstanding_io_count; 136 TAILQ_HEAD(, ut_expected_io) expected_io; 137 }; 138 139 static bool g_io_done; 140 static struct spdk_bdev_io *g_bdev_io; 141 static enum spdk_bdev_io_status g_io_status; 142 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 143 static uint32_t g_bdev_ut_io_device; 144 static struct bdev_ut_channel *g_bdev_ut_channel; 145 static void *g_compare_read_buf; 146 static uint32_t g_compare_read_buf_len; 147 static void *g_compare_write_buf; 148 static uint32_t g_compare_write_buf_len; 149 static void *g_compare_md_buf; 150 static bool g_abort_done; 151 static enum spdk_bdev_io_status g_abort_status; 152 static void *g_zcopy_read_buf; 153 static uint32_t g_zcopy_read_buf_len; 154 static void *g_zcopy_write_buf; 155 static uint32_t g_zcopy_write_buf_len; 156 static struct spdk_bdev_io *g_zcopy_bdev_io; 157 static uint64_t g_seek_data_offset; 158 static uint64_t g_seek_hole_offset; 159 static uint64_t g_seek_offset; 160 161 static struct ut_expected_io * 162 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 163 { 164 struct ut_expected_io *expected_io; 165 166 expected_io = calloc(1, sizeof(*expected_io)); 167 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 168 169 expected_io->type = type; 170 expected_io->offset = offset; 171 expected_io->length = length; 172 expected_io->iovcnt = iovcnt; 173 174 return expected_io; 175 } 176 177 static struct ut_expected_io * 178 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 179 { 180 struct ut_expected_io *expected_io; 181 182 expected_io = calloc(1, sizeof(*expected_io)); 183 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 184 185 expected_io->type = type; 186 expected_io->offset = offset; 187 expected_io->src_offset = src_offset; 188 expected_io->length = length; 189 190 return expected_io; 191 } 192 193 static void 194 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 195 { 196 expected_io->iov[pos].iov_base = base; 197 expected_io->iov[pos].iov_len = len; 198 } 199 200 static void 201 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 202 { 203 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 204 struct ut_expected_io *expected_io; 205 struct iovec *iov, *expected_iov; 206 struct spdk_bdev_io *bio_to_abort; 207 struct bdev_ut_io *bio; 208 int i; 209 210 g_bdev_io = bdev_io; 211 212 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 213 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 214 215 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 216 CU_ASSERT(g_compare_read_buf_len == len); 217 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 218 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 219 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 220 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 221 } 222 } 223 224 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 225 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 226 227 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 228 CU_ASSERT(g_compare_write_buf_len == len); 229 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 230 } 231 232 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 233 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 234 235 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 236 CU_ASSERT(g_compare_read_buf_len == len); 237 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 if (bdev_io->u.bdev.md_buf && 241 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 242 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 243 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 244 } 245 } 246 247 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 248 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 249 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 250 bio_to_abort = spdk_bdev_io_from_ctx(bio); 251 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 252 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 253 ch->outstanding_io_count--; 254 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 255 break; 256 } 257 } 258 } 259 } 260 261 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 262 if (bdev_io->u.bdev.zcopy.start) { 263 g_zcopy_bdev_io = bdev_io; 264 if (bdev_io->u.bdev.zcopy.populate) { 265 /* Start of a read */ 266 CU_ASSERT(g_zcopy_read_buf != NULL); 267 CU_ASSERT(g_zcopy_read_buf_len > 0); 268 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 269 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 270 bdev_io->u.bdev.iovcnt = 1; 271 } else { 272 /* Start of a write */ 273 CU_ASSERT(g_zcopy_write_buf != NULL); 274 CU_ASSERT(g_zcopy_write_buf_len > 0); 275 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 276 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 277 bdev_io->u.bdev.iovcnt = 1; 278 } 279 } else { 280 if (bdev_io->u.bdev.zcopy.commit) { 281 /* End of write */ 282 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 284 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 285 g_zcopy_write_buf = NULL; 286 g_zcopy_write_buf_len = 0; 287 } else { 288 /* End of read */ 289 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 290 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 291 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 292 g_zcopy_read_buf = NULL; 293 g_zcopy_read_buf_len = 0; 294 } 295 } 296 } 297 298 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 299 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 300 } 301 302 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 303 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 304 } 305 306 TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link); 307 ch->outstanding_io_count++; 308 309 expected_io = TAILQ_FIRST(&ch->expected_io); 310 if (expected_io == NULL) { 311 return; 312 } 313 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 314 315 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 316 CU_ASSERT(bdev_io->type == expected_io->type); 317 } 318 319 if (expected_io->md_buf != NULL) { 320 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 321 } 322 323 if (expected_io->length == 0) { 324 free(expected_io); 325 return; 326 } 327 328 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 329 CU_ASSERT(expected_io->length == bdev_io->u.bdev.num_blocks); 330 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 331 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 332 } 333 334 if (expected_io->iovcnt == 0) { 335 free(expected_io); 336 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 337 return; 338 } 339 340 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 341 for (i = 0; i < expected_io->iovcnt; i++) { 342 expected_iov = &expected_io->iov[i]; 343 if (bdev_io->internal.f.has_bounce_buf == false) { 344 iov = &bdev_io->u.bdev.iovs[i]; 345 } else { 346 iov = bdev_io->internal.bounce_buf.orig_iovs; 347 } 348 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 349 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 350 } 351 352 free(expected_io); 353 } 354 355 static void 356 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 357 struct spdk_bdev_io *bdev_io, bool success) 358 { 359 CU_ASSERT(success == true); 360 361 stub_submit_request(_ch, bdev_io); 362 } 363 364 static void 365 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 366 { 367 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 368 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 369 } 370 371 static uint32_t 372 stub_complete_io(uint32_t num_to_complete) 373 { 374 struct bdev_ut_channel *ch = g_bdev_ut_channel; 375 struct bdev_ut_io *bio; 376 struct spdk_bdev_io *bdev_io; 377 static enum spdk_bdev_io_status io_status; 378 uint32_t num_completed = 0; 379 380 while (num_completed < num_to_complete) { 381 if (TAILQ_EMPTY(&ch->outstanding_io)) { 382 break; 383 } 384 bio = TAILQ_FIRST(&ch->outstanding_io); 385 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 386 bdev_io = spdk_bdev_io_from_ctx(bio); 387 ch->outstanding_io_count--; 388 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 389 g_io_exp_status; 390 spdk_bdev_io_complete(bdev_io, io_status); 391 num_completed++; 392 } 393 394 return num_completed; 395 } 396 397 static struct spdk_io_channel * 398 bdev_ut_get_io_channel(void *ctx) 399 { 400 return spdk_get_io_channel(&g_bdev_ut_io_device); 401 } 402 403 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 404 [SPDK_BDEV_IO_TYPE_READ] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE] = true, 406 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 407 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 408 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 409 [SPDK_BDEV_IO_TYPE_RESET] = true, 410 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 411 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 412 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 413 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 414 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 415 [SPDK_BDEV_IO_TYPE_ABORT] = true, 416 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 417 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 418 [SPDK_BDEV_IO_TYPE_COPY] = true, 419 }; 420 421 static void 422 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 423 { 424 g_io_types_supported[io_type] = enable; 425 } 426 427 static bool 428 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 429 { 430 return g_io_types_supported[io_type]; 431 } 432 433 static struct spdk_bdev_fn_table fn_table = { 434 .destruct = stub_destruct, 435 .submit_request = stub_submit_request, 436 .get_io_channel = bdev_ut_get_io_channel, 437 .io_type_supported = stub_io_type_supported, 438 }; 439 440 static int 441 bdev_ut_create_ch(void *io_device, void *ctx_buf) 442 { 443 struct bdev_ut_channel *ch = ctx_buf; 444 445 CU_ASSERT(g_bdev_ut_channel == NULL); 446 g_bdev_ut_channel = ch; 447 448 TAILQ_INIT(&ch->outstanding_io); 449 ch->outstanding_io_count = 0; 450 TAILQ_INIT(&ch->expected_io); 451 return 0; 452 } 453 454 static void 455 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 456 { 457 CU_ASSERT(g_bdev_ut_channel != NULL); 458 g_bdev_ut_channel = NULL; 459 } 460 461 struct spdk_bdev_module bdev_ut_if; 462 463 static int 464 bdev_ut_module_init(void) 465 { 466 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 467 sizeof(struct bdev_ut_channel), NULL); 468 spdk_bdev_module_init_done(&bdev_ut_if); 469 return 0; 470 } 471 472 static void 473 bdev_ut_module_fini(void) 474 { 475 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 476 } 477 478 struct spdk_bdev_module bdev_ut_if = { 479 .name = "bdev_ut", 480 .module_init = bdev_ut_module_init, 481 .module_fini = bdev_ut_module_fini, 482 .async_init = true, 483 }; 484 485 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 486 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 487 488 static int 489 vbdev_ut_module_init(void) 490 { 491 return 0; 492 } 493 494 static void 495 vbdev_ut_module_fini(void) 496 { 497 } 498 499 static int 500 vbdev_ut_get_ctx_size(void) 501 { 502 return sizeof(struct bdev_ut_io); 503 } 504 505 struct spdk_bdev_module vbdev_ut_if = { 506 .name = "vbdev_ut", 507 .module_init = vbdev_ut_module_init, 508 .module_fini = vbdev_ut_module_fini, 509 .examine_config = vbdev_ut_examine_config, 510 .examine_disk = vbdev_ut_examine_disk, 511 .get_ctx_size = vbdev_ut_get_ctx_size, 512 }; 513 514 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 515 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 516 517 struct ut_examine_ctx { 518 void (*examine_config)(struct spdk_bdev *bdev); 519 void (*examine_disk)(struct spdk_bdev *bdev); 520 uint32_t examine_config_count; 521 uint32_t examine_disk_count; 522 }; 523 524 static void 525 vbdev_ut_examine_config(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_config_count++; 531 if (ctx->examine_config != NULL) { 532 ctx->examine_config(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static void 540 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 541 { 542 struct ut_examine_ctx *ctx = bdev->ctxt; 543 544 if (ctx != NULL) { 545 ctx->examine_disk_count++; 546 if (ctx->examine_disk != NULL) { 547 ctx->examine_disk(bdev); 548 } 549 } 550 551 spdk_bdev_module_examine_done(&vbdev_ut_if); 552 } 553 554 static void 555 bdev_init_cb(void *arg, int rc) 556 { 557 CU_ASSERT(rc == 0); 558 } 559 560 static void 561 bdev_fini_cb(void *arg) 562 { 563 } 564 565 static void 566 ut_init_bdev(struct spdk_bdev_opts *opts) 567 { 568 int rc; 569 570 if (opts != NULL) { 571 rc = spdk_bdev_set_opts(opts); 572 CU_ASSERT(rc == 0); 573 } 574 rc = spdk_iobuf_initialize(); 575 CU_ASSERT(rc == 0); 576 spdk_bdev_initialize(bdev_init_cb, NULL); 577 poll_threads(); 578 } 579 580 static void 581 ut_fini_bdev(void) 582 { 583 spdk_bdev_finish(bdev_fini_cb, NULL); 584 spdk_iobuf_finish(bdev_fini_cb, NULL); 585 poll_threads(); 586 } 587 588 static struct spdk_bdev * 589 allocate_bdev_ctx(char *name, void *ctx) 590 { 591 struct spdk_bdev *bdev; 592 int rc; 593 594 bdev = calloc(1, sizeof(*bdev)); 595 SPDK_CU_ASSERT_FATAL(bdev != NULL); 596 597 bdev->ctxt = ctx; 598 bdev->name = name; 599 bdev->fn_table = &fn_table; 600 bdev->module = &bdev_ut_if; 601 bdev->blockcnt = 1024; 602 bdev->blocklen = 512; 603 604 spdk_uuid_generate(&bdev->uuid); 605 606 rc = spdk_bdev_register(bdev); 607 poll_threads(); 608 CU_ASSERT(rc == 0); 609 610 return bdev; 611 } 612 613 static struct spdk_bdev * 614 allocate_bdev(char *name) 615 { 616 return allocate_bdev_ctx(name, NULL); 617 } 618 619 static struct spdk_bdev * 620 allocate_vbdev(char *name) 621 { 622 struct spdk_bdev *bdev; 623 int rc; 624 625 bdev = calloc(1, sizeof(*bdev)); 626 SPDK_CU_ASSERT_FATAL(bdev != NULL); 627 628 bdev->name = name; 629 bdev->fn_table = &fn_table; 630 bdev->module = &vbdev_ut_if; 631 bdev->blockcnt = 1024; 632 bdev->blocklen = 512; 633 634 rc = spdk_bdev_register(bdev); 635 poll_threads(); 636 CU_ASSERT(rc == 0); 637 638 return bdev; 639 } 640 641 static void 642 free_bdev(struct spdk_bdev *bdev) 643 { 644 spdk_bdev_unregister(bdev, NULL, NULL); 645 poll_threads(); 646 memset(bdev, 0xFF, sizeof(*bdev)); 647 free(bdev); 648 } 649 650 static void 651 free_vbdev(struct spdk_bdev *bdev) 652 { 653 spdk_bdev_unregister(bdev, NULL, NULL); 654 poll_threads(); 655 memset(bdev, 0xFF, sizeof(*bdev)); 656 free(bdev); 657 } 658 659 static void 660 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 661 { 662 const char *bdev_name; 663 664 CU_ASSERT(bdev != NULL); 665 CU_ASSERT(rc == 0); 666 bdev_name = spdk_bdev_get_name(bdev); 667 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 668 669 free(stat); 670 671 *(bool *)cb_arg = true; 672 } 673 674 static void 675 bdev_unregister_cb(void *cb_arg, int rc) 676 { 677 g_unregister_arg = cb_arg; 678 g_unregister_rc = rc; 679 } 680 681 static void 682 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 683 { 684 } 685 686 static void 687 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 688 { 689 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 690 691 g_event_type1 = type; 692 if (SPDK_BDEV_EVENT_REMOVE == type) { 693 spdk_bdev_close(desc); 694 } 695 } 696 697 static void 698 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 699 { 700 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 701 702 g_event_type2 = type; 703 if (SPDK_BDEV_EVENT_REMOVE == type) { 704 spdk_bdev_close(desc); 705 } 706 } 707 708 static void 709 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 710 { 711 g_event_type3 = type; 712 } 713 714 static void 715 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 716 { 717 g_event_type4 = type; 718 } 719 720 static void 721 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 722 { 723 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 724 spdk_bdev_free_io(bdev_io); 725 } 726 727 static void 728 get_device_stat_test(void) 729 { 730 struct spdk_bdev *bdev; 731 struct spdk_bdev_io_stat *stat; 732 bool done; 733 734 bdev = allocate_bdev("bdev0"); 735 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 736 if (stat == NULL) { 737 free_bdev(bdev); 738 return; 739 } 740 741 done = false; 742 spdk_bdev_get_device_stat(bdev, stat, SPDK_BDEV_RESET_STAT_NONE, get_device_stat_cb, &done); 743 while (!done) { poll_threads(); } 744 745 free_bdev(bdev); 746 } 747 748 static void 749 open_write_test(void) 750 { 751 struct spdk_bdev *bdev[9]; 752 struct spdk_bdev_desc *desc[9] = {}; 753 int rc; 754 755 ut_init_bdev(NULL); 756 757 /* 758 * Create a tree of bdevs to test various open w/ write cases. 759 * 760 * bdev0 through bdev3 are physical block devices, such as NVMe 761 * namespaces or Ceph block devices. 762 * 763 * bdev4 is a virtual bdev with multiple base bdevs. This models 764 * caching or RAID use cases. 765 * 766 * bdev5 through bdev7 are all virtual bdevs with the same base 767 * bdev (except bdev7). This models partitioning or logical volume 768 * use cases. 769 * 770 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 771 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 772 * models caching, RAID, partitioning or logical volumes use cases. 773 * 774 * bdev8 is a virtual bdev with multiple base bdevs, but these 775 * base bdevs are themselves virtual bdevs. 776 * 777 * bdev8 778 * | 779 * +----------+ 780 * | | 781 * bdev4 bdev5 bdev6 bdev7 782 * | | | | 783 * +---+---+ +---+ + +---+---+ 784 * | | \ | / \ 785 * bdev0 bdev1 bdev2 bdev3 786 */ 787 788 bdev[0] = allocate_bdev("bdev0"); 789 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 790 CU_ASSERT(rc == 0); 791 792 bdev[1] = allocate_bdev("bdev1"); 793 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 794 CU_ASSERT(rc == 0); 795 796 bdev[2] = allocate_bdev("bdev2"); 797 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 798 CU_ASSERT(rc == 0); 799 800 bdev[3] = allocate_bdev("bdev3"); 801 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 802 CU_ASSERT(rc == 0); 803 804 bdev[4] = allocate_vbdev("bdev4"); 805 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 806 CU_ASSERT(rc == 0); 807 808 bdev[5] = allocate_vbdev("bdev5"); 809 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 810 CU_ASSERT(rc == 0); 811 812 bdev[6] = allocate_vbdev("bdev6"); 813 814 bdev[7] = allocate_vbdev("bdev7"); 815 816 bdev[8] = allocate_vbdev("bdev8"); 817 818 /* Open bdev0 read-only. This should succeed. */ 819 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 820 CU_ASSERT(rc == 0); 821 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 822 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 823 spdk_bdev_close(desc[0]); 824 825 /* 826 * Open bdev1 read/write. This should fail since bdev1 has been claimed 827 * by a vbdev module. 828 */ 829 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 830 CU_ASSERT(rc == -EPERM); 831 832 /* 833 * Open bdev4 read/write. This should fail since bdev3 has been claimed 834 * by a vbdev module. 835 */ 836 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 837 CU_ASSERT(rc == -EPERM); 838 839 /* Open bdev4 read-only. This should succeed. */ 840 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 841 CU_ASSERT(rc == 0); 842 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 843 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 844 spdk_bdev_close(desc[4]); 845 846 /* 847 * Open bdev8 read/write. This should succeed since it is a leaf 848 * bdev. 849 */ 850 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 851 CU_ASSERT(rc == 0); 852 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 853 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 854 spdk_bdev_close(desc[8]); 855 856 /* 857 * Open bdev5 read/write. This should fail since bdev4 has been claimed 858 * by a vbdev module. 859 */ 860 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 861 CU_ASSERT(rc == -EPERM); 862 863 /* Open bdev4 read-only. This should succeed. */ 864 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 865 CU_ASSERT(rc == 0); 866 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 867 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 868 spdk_bdev_close(desc[5]); 869 870 free_vbdev(bdev[8]); 871 872 free_vbdev(bdev[5]); 873 free_vbdev(bdev[6]); 874 free_vbdev(bdev[7]); 875 876 free_vbdev(bdev[4]); 877 878 free_bdev(bdev[0]); 879 free_bdev(bdev[1]); 880 free_bdev(bdev[2]); 881 free_bdev(bdev[3]); 882 883 ut_fini_bdev(); 884 } 885 886 static void 887 claim_test(void) 888 { 889 struct spdk_bdev *bdev; 890 struct spdk_bdev_desc *desc, *open_desc; 891 int rc; 892 uint32_t count; 893 894 ut_init_bdev(NULL); 895 896 /* 897 * A vbdev that uses a read-only bdev may need it to remain read-only. 898 * To do so, it opens the bdev read-only, then claims it without 899 * passing a spdk_bdev_desc. 900 */ 901 bdev = allocate_bdev("bdev0"); 902 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 903 CU_ASSERT(rc == 0); 904 CU_ASSERT(desc->write == false); 905 906 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 907 CU_ASSERT(rc == 0); 908 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 909 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 910 911 /* There should be only one open descriptor and it should still be ro */ 912 count = 0; 913 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 914 CU_ASSERT(open_desc == desc); 915 CU_ASSERT(!open_desc->write); 916 count++; 917 } 918 CU_ASSERT(count == 1); 919 920 /* A read-only bdev is upgraded to read-write if desc is passed. */ 921 spdk_bdev_module_release_bdev(bdev); 922 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 923 CU_ASSERT(rc == 0); 924 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 925 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 926 927 /* There should be only one open descriptor and it should be rw */ 928 count = 0; 929 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 930 CU_ASSERT(open_desc == desc); 931 CU_ASSERT(open_desc->write); 932 count++; 933 } 934 CU_ASSERT(count == 1); 935 936 spdk_bdev_close(desc); 937 free_bdev(bdev); 938 ut_fini_bdev(); 939 } 940 941 static void 942 bytes_to_blocks_test(void) 943 { 944 struct spdk_bdev_desc desc; 945 struct spdk_bdev bdev; 946 uint64_t offset_blocks, num_blocks; 947 948 949 desc.bdev = &bdev; 950 memset(&bdev, 0, sizeof(bdev)); 951 952 bdev.blocklen = 512; 953 954 /* All parameters valid */ 955 offset_blocks = 0; 956 num_blocks = 0; 957 CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 1024, &num_blocks) == 0); 958 CU_ASSERT(offset_blocks == 1); 959 CU_ASSERT(num_blocks == 2); 960 961 /* Offset not a block multiple */ 962 CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 512, &num_blocks) != 0); 963 964 /* Length not a block multiple */ 965 CU_ASSERT(bdev_bytes_to_blocks(&desc, 512, &offset_blocks, 3, &num_blocks) != 0); 966 967 /* In case blocklen not the power of two */ 968 bdev.blocklen = 100; 969 CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 200, &num_blocks) == 0); 970 CU_ASSERT(offset_blocks == 1); 971 CU_ASSERT(num_blocks == 2); 972 973 /* Offset not a block multiple */ 974 CU_ASSERT(bdev_bytes_to_blocks(&desc, 3, &offset_blocks, 100, &num_blocks) != 0); 975 976 /* Length not a block multiple */ 977 CU_ASSERT(bdev_bytes_to_blocks(&desc, 100, &offset_blocks, 3, &num_blocks) != 0); 978 } 979 980 static void 981 num_blocks_test(void) 982 { 983 struct spdk_bdev *bdev; 984 struct spdk_bdev_desc *desc = NULL; 985 int rc; 986 987 ut_init_bdev(NULL); 988 bdev = allocate_bdev("num_blocks"); 989 990 spdk_bdev_notify_blockcnt_change(bdev, 50); 991 992 /* Growing block number */ 993 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0); 994 /* Shrinking block number */ 995 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0); 996 997 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 998 CU_ASSERT(rc == 0); 999 SPDK_CU_ASSERT_FATAL(desc != NULL); 1000 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1001 1002 /* Growing block number */ 1003 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0); 1004 /* Shrinking block number */ 1005 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0); 1006 1007 g_event_type1 = 0xFF; 1008 /* Growing block number */ 1009 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0); 1010 1011 poll_threads(); 1012 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 1013 1014 g_event_type1 = 0xFF; 1015 /* Growing block number and closing */ 1016 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0); 1017 1018 spdk_bdev_close(desc); 1019 free_bdev(bdev); 1020 ut_fini_bdev(); 1021 1022 poll_threads(); 1023 1024 /* Callback is not called for closed device */ 1025 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 1026 } 1027 1028 static void 1029 io_valid_test(void) 1030 { 1031 struct spdk_bdev bdev; 1032 1033 memset(&bdev, 0, sizeof(bdev)); 1034 1035 bdev.blocklen = 512; 1036 spdk_spin_init(&bdev.internal.spinlock); 1037 1038 spdk_bdev_notify_blockcnt_change(&bdev, 100); 1039 1040 /* All parameters valid */ 1041 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 1042 1043 /* Last valid block */ 1044 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 1045 1046 /* Offset past end of bdev */ 1047 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 1048 1049 /* Offset + length past end of bdev */ 1050 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 1051 1052 /* Offset near end of uint64_t range (2^64 - 1) */ 1053 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 1054 1055 spdk_spin_destroy(&bdev.internal.spinlock); 1056 } 1057 1058 static void 1059 alias_add_del_test(void) 1060 { 1061 struct spdk_bdev *bdev[3]; 1062 int rc; 1063 1064 ut_init_bdev(NULL); 1065 1066 /* Creating and registering bdevs */ 1067 bdev[0] = allocate_bdev("bdev0"); 1068 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1069 1070 bdev[1] = allocate_bdev("bdev1"); 1071 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1072 1073 bdev[2] = allocate_bdev("bdev2"); 1074 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1075 1076 poll_threads(); 1077 1078 /* 1079 * Trying adding an alias identical to name. 1080 * Alias is identical to name, so it can not be added to aliases list 1081 */ 1082 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1083 CU_ASSERT(rc == -EEXIST); 1084 1085 /* 1086 * Trying to add empty alias, 1087 * this one should fail 1088 */ 1089 rc = spdk_bdev_alias_add(bdev[0], NULL); 1090 CU_ASSERT(rc == -EINVAL); 1091 1092 /* Trying adding same alias to two different registered bdevs */ 1093 1094 /* Alias is used first time, so this one should pass */ 1095 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1096 CU_ASSERT(rc == 0); 1097 1098 /* Alias was added to another bdev, so this one should fail */ 1099 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1100 CU_ASSERT(rc == -EEXIST); 1101 1102 /* Alias is used first time, so this one should pass */ 1103 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1104 CU_ASSERT(rc == 0); 1105 1106 /* Trying removing an alias from registered bdevs */ 1107 1108 /* Alias is not on a bdev aliases list, so this one should fail */ 1109 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1110 CU_ASSERT(rc == -ENOENT); 1111 1112 /* Alias is present on a bdev aliases list, so this one should pass */ 1113 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1114 CU_ASSERT(rc == 0); 1115 1116 /* Alias is present on a bdev aliases list, so this one should pass */ 1117 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1118 CU_ASSERT(rc == 0); 1119 1120 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1121 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1122 CU_ASSERT(rc != 0); 1123 1124 /* Trying to del all alias from empty alias list */ 1125 spdk_bdev_alias_del_all(bdev[2]); 1126 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1127 1128 /* Trying to del all alias from non-empty alias list */ 1129 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1130 CU_ASSERT(rc == 0); 1131 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1132 CU_ASSERT(rc == 0); 1133 spdk_bdev_alias_del_all(bdev[2]); 1134 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1135 1136 /* Unregister and free bdevs */ 1137 spdk_bdev_unregister(bdev[0], NULL, NULL); 1138 spdk_bdev_unregister(bdev[1], NULL, NULL); 1139 spdk_bdev_unregister(bdev[2], NULL, NULL); 1140 1141 poll_threads(); 1142 1143 free(bdev[0]); 1144 free(bdev[1]); 1145 free(bdev[2]); 1146 1147 ut_fini_bdev(); 1148 } 1149 1150 static void 1151 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1152 { 1153 g_io_done = true; 1154 g_io_status = bdev_io->internal.status; 1155 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1156 (bdev_io->u.bdev.zcopy.start)) { 1157 g_zcopy_bdev_io = bdev_io; 1158 } else { 1159 spdk_bdev_free_io(bdev_io); 1160 g_zcopy_bdev_io = NULL; 1161 } 1162 } 1163 1164 struct bdev_ut_io_wait_entry { 1165 struct spdk_bdev_io_wait_entry entry; 1166 struct spdk_io_channel *io_ch; 1167 struct spdk_bdev_desc *desc; 1168 bool submitted; 1169 }; 1170 1171 static void 1172 io_wait_cb(void *arg) 1173 { 1174 struct bdev_ut_io_wait_entry *entry = arg; 1175 int rc; 1176 1177 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1178 CU_ASSERT(rc == 0); 1179 entry->submitted = true; 1180 } 1181 1182 static void 1183 bdev_io_types_test(void) 1184 { 1185 struct spdk_bdev *bdev; 1186 struct spdk_bdev_desc *desc = NULL; 1187 struct spdk_io_channel *io_ch; 1188 struct spdk_bdev_opts bdev_opts = {}; 1189 int rc; 1190 1191 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1192 bdev_opts.bdev_io_pool_size = 4; 1193 bdev_opts.bdev_io_cache_size = 2; 1194 ut_init_bdev(&bdev_opts); 1195 1196 bdev = allocate_bdev("bdev0"); 1197 1198 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1199 CU_ASSERT(rc == 0); 1200 poll_threads(); 1201 SPDK_CU_ASSERT_FATAL(desc != NULL); 1202 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1203 io_ch = spdk_bdev_get_io_channel(desc); 1204 CU_ASSERT(io_ch != NULL); 1205 1206 /* WRITE and WRITE ZEROES are not supported */ 1207 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1208 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1209 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1210 CU_ASSERT(rc == -ENOTSUP); 1211 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1212 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1213 1214 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1215 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1216 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1217 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1218 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1219 CU_ASSERT(rc == -ENOTSUP); 1220 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1221 CU_ASSERT(rc == -ENOTSUP); 1222 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1223 CU_ASSERT(rc == -ENOTSUP); 1224 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1225 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1226 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1227 1228 spdk_put_io_channel(io_ch); 1229 spdk_bdev_close(desc); 1230 free_bdev(bdev); 1231 ut_fini_bdev(); 1232 } 1233 1234 static void 1235 bdev_io_wait_test(void) 1236 { 1237 struct spdk_bdev *bdev; 1238 struct spdk_bdev_desc *desc = NULL; 1239 struct spdk_io_channel *io_ch; 1240 struct spdk_bdev_opts bdev_opts = {}; 1241 struct bdev_ut_io_wait_entry io_wait_entry; 1242 struct bdev_ut_io_wait_entry io_wait_entry2; 1243 int rc; 1244 1245 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1246 bdev_opts.bdev_io_pool_size = 4; 1247 bdev_opts.bdev_io_cache_size = 2; 1248 ut_init_bdev(&bdev_opts); 1249 1250 bdev = allocate_bdev("bdev0"); 1251 1252 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1253 CU_ASSERT(rc == 0); 1254 poll_threads(); 1255 SPDK_CU_ASSERT_FATAL(desc != NULL); 1256 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1257 io_ch = spdk_bdev_get_io_channel(desc); 1258 CU_ASSERT(io_ch != NULL); 1259 1260 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1261 CU_ASSERT(rc == 0); 1262 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1263 CU_ASSERT(rc == 0); 1264 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1265 CU_ASSERT(rc == 0); 1266 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1267 CU_ASSERT(rc == 0); 1268 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1269 1270 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1271 CU_ASSERT(rc == -ENOMEM); 1272 1273 io_wait_entry.entry.bdev = bdev; 1274 io_wait_entry.entry.cb_fn = io_wait_cb; 1275 io_wait_entry.entry.cb_arg = &io_wait_entry; 1276 io_wait_entry.io_ch = io_ch; 1277 io_wait_entry.desc = desc; 1278 io_wait_entry.submitted = false; 1279 /* Cannot use the same io_wait_entry for two different calls. */ 1280 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1281 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1282 1283 /* Queue two I/O waits. */ 1284 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1285 CU_ASSERT(rc == 0); 1286 CU_ASSERT(io_wait_entry.submitted == false); 1287 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1288 CU_ASSERT(rc == 0); 1289 CU_ASSERT(io_wait_entry2.submitted == false); 1290 1291 stub_complete_io(1); 1292 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1293 CU_ASSERT(io_wait_entry.submitted == true); 1294 CU_ASSERT(io_wait_entry2.submitted == false); 1295 1296 stub_complete_io(1); 1297 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1298 CU_ASSERT(io_wait_entry2.submitted == true); 1299 1300 stub_complete_io(4); 1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1302 1303 spdk_put_io_channel(io_ch); 1304 spdk_bdev_close(desc); 1305 free_bdev(bdev); 1306 ut_fini_bdev(); 1307 } 1308 1309 static void 1310 bdev_io_spans_split_test(void) 1311 { 1312 struct spdk_bdev bdev; 1313 struct spdk_bdev_io bdev_io; 1314 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1315 1316 memset(&bdev, 0, sizeof(bdev)); 1317 bdev_io.u.bdev.iovs = iov; 1318 1319 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1320 bdev.optimal_io_boundary = 0; 1321 bdev.max_segment_size = 0; 1322 bdev.max_num_segments = 0; 1323 bdev_io.bdev = &bdev; 1324 1325 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1326 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1327 1328 bdev.split_on_optimal_io_boundary = true; 1329 bdev.optimal_io_boundary = 32; 1330 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1331 1332 /* RESETs are not based on LBAs - so this should return false. */ 1333 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1334 1335 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1336 bdev_io.u.bdev.offset_blocks = 0; 1337 bdev_io.u.bdev.num_blocks = 32; 1338 1339 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1340 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1341 1342 bdev_io.u.bdev.num_blocks = 33; 1343 1344 /* This I/O spans a boundary. */ 1345 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1346 1347 bdev_io.u.bdev.num_blocks = 32; 1348 bdev.max_segment_size = 512 * 32; 1349 bdev.max_num_segments = 1; 1350 bdev_io.u.bdev.iovcnt = 1; 1351 iov[0].iov_len = 512; 1352 1353 /* Does not cross and exceed max_size or max_segs */ 1354 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1355 1356 bdev.split_on_optimal_io_boundary = false; 1357 bdev.max_segment_size = 512; 1358 bdev.max_num_segments = 1; 1359 bdev_io.u.bdev.iovcnt = 2; 1360 1361 /* Exceed max_segs */ 1362 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1363 1364 bdev.max_num_segments = 2; 1365 iov[0].iov_len = 513; 1366 iov[1].iov_len = 512; 1367 1368 /* Exceed max_sizes */ 1369 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1370 1371 bdev.max_segment_size = 0; 1372 bdev.write_unit_size = 32; 1373 bdev.split_on_write_unit = true; 1374 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1375 1376 /* This I/O is one write unit */ 1377 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1378 1379 bdev_io.u.bdev.num_blocks = 32 * 2; 1380 1381 /* This I/O is more than one write unit */ 1382 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1383 1384 bdev_io.u.bdev.offset_blocks = 1; 1385 bdev_io.u.bdev.num_blocks = 32; 1386 1387 /* This I/O is not aligned to write unit size */ 1388 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1389 } 1390 1391 static void 1392 bdev_io_boundary_split_test(void) 1393 { 1394 struct spdk_bdev *bdev; 1395 struct spdk_bdev_desc *desc = NULL; 1396 struct spdk_io_channel *io_ch; 1397 struct spdk_bdev_opts bdev_opts = {}; 1398 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1399 struct ut_expected_io *expected_io; 1400 void *md_buf = (void *)0xFF000000; 1401 uint64_t i; 1402 int rc; 1403 1404 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1405 bdev_opts.bdev_io_pool_size = 512; 1406 bdev_opts.bdev_io_cache_size = 64; 1407 ut_init_bdev(&bdev_opts); 1408 1409 bdev = allocate_bdev("bdev0"); 1410 1411 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1412 CU_ASSERT(rc == 0); 1413 SPDK_CU_ASSERT_FATAL(desc != NULL); 1414 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1415 io_ch = spdk_bdev_get_io_channel(desc); 1416 CU_ASSERT(io_ch != NULL); 1417 1418 bdev->optimal_io_boundary = 16; 1419 bdev->split_on_optimal_io_boundary = false; 1420 1421 g_io_done = false; 1422 1423 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1425 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1426 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1427 1428 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1429 CU_ASSERT(rc == 0); 1430 CU_ASSERT(g_io_done == false); 1431 1432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1433 stub_complete_io(1); 1434 CU_ASSERT(g_io_done == true); 1435 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1436 1437 bdev->split_on_optimal_io_boundary = true; 1438 bdev->md_interleave = false; 1439 bdev->md_len = 8; 1440 1441 /* Now test that a single-vector command is split correctly. 1442 * Offset 14, length 8, payload 0xF000 1443 * Child - Offset 14, length 2, payload 0xF000 1444 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1445 * 1446 * Set up the expected values before calling spdk_bdev_read_blocks 1447 */ 1448 g_io_done = false; 1449 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1450 expected_io->md_buf = md_buf; 1451 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1452 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1453 1454 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1455 expected_io->md_buf = md_buf + 2 * 8; 1456 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1457 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1458 1459 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1460 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1461 14, 8, io_done, NULL); 1462 CU_ASSERT(rc == 0); 1463 CU_ASSERT(g_io_done == false); 1464 1465 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1466 stub_complete_io(2); 1467 CU_ASSERT(g_io_done == true); 1468 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1469 1470 /* Now set up a more complex, multi-vector command that needs to be split, 1471 * including splitting iovecs. 1472 */ 1473 iov[0].iov_base = (void *)0x10000; 1474 iov[0].iov_len = 512; 1475 iov[1].iov_base = (void *)0x20000; 1476 iov[1].iov_len = 20 * 512; 1477 iov[2].iov_base = (void *)0x30000; 1478 iov[2].iov_len = 11 * 512; 1479 1480 g_io_done = false; 1481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1482 expected_io->md_buf = md_buf; 1483 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1484 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1485 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1486 1487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1488 expected_io->md_buf = md_buf + 2 * 8; 1489 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1490 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1491 1492 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1493 expected_io->md_buf = md_buf + 18 * 8; 1494 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1495 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1497 1498 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1499 14, 32, io_done, NULL); 1500 CU_ASSERT(rc == 0); 1501 CU_ASSERT(g_io_done == false); 1502 1503 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1504 stub_complete_io(3); 1505 CU_ASSERT(g_io_done == true); 1506 1507 /* Test multi vector command that needs to be split by strip and then needs to be 1508 * split further due to the capacity of child iovs. 1509 */ 1510 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1511 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1512 iov[i].iov_len = 512; 1513 } 1514 1515 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1516 g_io_done = false; 1517 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1518 SPDK_BDEV_IO_NUM_CHILD_IOV); 1519 expected_io->md_buf = md_buf; 1520 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1521 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1522 } 1523 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1524 1525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1526 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1527 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1528 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1529 ut_expected_io_set_iov(expected_io, i, 1530 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1531 } 1532 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1533 1534 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1535 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1536 CU_ASSERT(rc == 0); 1537 CU_ASSERT(g_io_done == false); 1538 1539 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1540 stub_complete_io(1); 1541 CU_ASSERT(g_io_done == false); 1542 1543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1544 stub_complete_io(1); 1545 CU_ASSERT(g_io_done == true); 1546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1547 1548 /* Test multi vector command that needs to be split by strip and then needs to be 1549 * split further due to the capacity of child iovs. In this case, the length of 1550 * the rest of iovec array with an I/O boundary is the multiple of block size. 1551 */ 1552 1553 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1554 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1555 */ 1556 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1557 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1558 iov[i].iov_len = 512; 1559 } 1560 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1561 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1562 iov[i].iov_len = 256; 1563 } 1564 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1565 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1566 1567 /* Add an extra iovec to trigger split */ 1568 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1569 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1570 1571 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1572 g_io_done = false; 1573 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1574 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1575 expected_io->md_buf = md_buf; 1576 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1577 ut_expected_io_set_iov(expected_io, i, 1578 (void *)((i + 1) * 0x10000), 512); 1579 } 1580 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1581 ut_expected_io_set_iov(expected_io, i, 1582 (void *)((i + 1) * 0x10000), 256); 1583 } 1584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1585 1586 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1587 1, 1); 1588 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1589 ut_expected_io_set_iov(expected_io, 0, 1590 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1591 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1592 1593 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1594 1, 1); 1595 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1596 ut_expected_io_set_iov(expected_io, 0, 1597 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1598 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1599 1600 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1601 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1602 CU_ASSERT(rc == 0); 1603 CU_ASSERT(g_io_done == false); 1604 1605 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1606 stub_complete_io(1); 1607 CU_ASSERT(g_io_done == false); 1608 1609 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1610 stub_complete_io(2); 1611 CU_ASSERT(g_io_done == true); 1612 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1613 1614 /* Test multi vector command that needs to be split by strip and then needs to be 1615 * split further due to the capacity of child iovs, the child request offset should 1616 * be rewind to last aligned offset and go success without error. 1617 */ 1618 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1619 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1620 iov[i].iov_len = 512; 1621 } 1622 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1623 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1624 1625 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1626 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1627 1628 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1629 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1630 1631 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1632 g_io_done = false; 1633 g_io_status = 0; 1634 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1635 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1636 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1637 expected_io->md_buf = md_buf; 1638 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1639 ut_expected_io_set_iov(expected_io, i, 1640 (void *)((i + 1) * 0x10000), 512); 1641 } 1642 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1643 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1645 1, 2); 1646 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1647 ut_expected_io_set_iov(expected_io, 0, 1648 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1649 ut_expected_io_set_iov(expected_io, 1, 1650 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1652 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1653 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1654 1, 1); 1655 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1656 ut_expected_io_set_iov(expected_io, 0, 1657 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1658 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1659 1660 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1661 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1662 CU_ASSERT(rc == 0); 1663 CU_ASSERT(g_io_done == false); 1664 1665 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1666 stub_complete_io(1); 1667 CU_ASSERT(g_io_done == false); 1668 1669 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1670 stub_complete_io(2); 1671 CU_ASSERT(g_io_done == true); 1672 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1673 1674 /* Test multi vector command that needs to be split due to the IO boundary and 1675 * the capacity of child iovs. Especially test the case when the command is 1676 * split due to the capacity of child iovs, the tail address is not aligned with 1677 * block size and is rewinded to the aligned address. 1678 * 1679 * The iovecs used in read request is complex but is based on the data 1680 * collected in the real issue. We change the base addresses but keep the lengths 1681 * not to loose the credibility of the test. 1682 */ 1683 bdev->optimal_io_boundary = 128; 1684 g_io_done = false; 1685 g_io_status = 0; 1686 1687 for (i = 0; i < 31; i++) { 1688 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1689 iov[i].iov_len = 1024; 1690 } 1691 iov[31].iov_base = (void *)0xFEED1F00000; 1692 iov[31].iov_len = 32768; 1693 iov[32].iov_base = (void *)0xFEED2000000; 1694 iov[32].iov_len = 160; 1695 iov[33].iov_base = (void *)0xFEED2100000; 1696 iov[33].iov_len = 4096; 1697 iov[34].iov_base = (void *)0xFEED2200000; 1698 iov[34].iov_len = 4096; 1699 iov[35].iov_base = (void *)0xFEED2300000; 1700 iov[35].iov_len = 4096; 1701 iov[36].iov_base = (void *)0xFEED2400000; 1702 iov[36].iov_len = 4096; 1703 iov[37].iov_base = (void *)0xFEED2500000; 1704 iov[37].iov_len = 4096; 1705 iov[38].iov_base = (void *)0xFEED2600000; 1706 iov[38].iov_len = 4096; 1707 iov[39].iov_base = (void *)0xFEED2700000; 1708 iov[39].iov_len = 4096; 1709 iov[40].iov_base = (void *)0xFEED2800000; 1710 iov[40].iov_len = 4096; 1711 iov[41].iov_base = (void *)0xFEED2900000; 1712 iov[41].iov_len = 4096; 1713 iov[42].iov_base = (void *)0xFEED2A00000; 1714 iov[42].iov_len = 4096; 1715 iov[43].iov_base = (void *)0xFEED2B00000; 1716 iov[43].iov_len = 12288; 1717 iov[44].iov_base = (void *)0xFEED2C00000; 1718 iov[44].iov_len = 8192; 1719 iov[45].iov_base = (void *)0xFEED2F00000; 1720 iov[45].iov_len = 4096; 1721 iov[46].iov_base = (void *)0xFEED3000000; 1722 iov[46].iov_len = 4096; 1723 iov[47].iov_base = (void *)0xFEED3100000; 1724 iov[47].iov_len = 4096; 1725 iov[48].iov_base = (void *)0xFEED3200000; 1726 iov[48].iov_len = 24576; 1727 iov[49].iov_base = (void *)0xFEED3300000; 1728 iov[49].iov_len = 16384; 1729 iov[50].iov_base = (void *)0xFEED3400000; 1730 iov[50].iov_len = 12288; 1731 iov[51].iov_base = (void *)0xFEED3500000; 1732 iov[51].iov_len = 4096; 1733 iov[52].iov_base = (void *)0xFEED3600000; 1734 iov[52].iov_len = 4096; 1735 iov[53].iov_base = (void *)0xFEED3700000; 1736 iov[53].iov_len = 4096; 1737 iov[54].iov_base = (void *)0xFEED3800000; 1738 iov[54].iov_len = 28672; 1739 iov[55].iov_base = (void *)0xFEED3900000; 1740 iov[55].iov_len = 20480; 1741 iov[56].iov_base = (void *)0xFEED3A00000; 1742 iov[56].iov_len = 4096; 1743 iov[57].iov_base = (void *)0xFEED3B00000; 1744 iov[57].iov_len = 12288; 1745 iov[58].iov_base = (void *)0xFEED3C00000; 1746 iov[58].iov_len = 4096; 1747 iov[59].iov_base = (void *)0xFEED3D00000; 1748 iov[59].iov_len = 4096; 1749 iov[60].iov_base = (void *)0xFEED3E00000; 1750 iov[60].iov_len = 352; 1751 1752 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1753 * of child iovs, 1754 */ 1755 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1756 expected_io->md_buf = md_buf; 1757 for (i = 0; i < 32; i++) { 1758 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1759 } 1760 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1761 1762 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1763 * split by the IO boundary requirement. 1764 */ 1765 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1766 expected_io->md_buf = md_buf + 126 * 8; 1767 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1768 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1769 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1770 1771 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1772 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1773 */ 1774 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1775 expected_io->md_buf = md_buf + 128 * 8; 1776 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1777 iov[33].iov_len - 864); 1778 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1779 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1780 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1781 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1782 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1783 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1784 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1785 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1786 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1787 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1788 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1789 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1790 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1791 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1792 1793 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1794 * first 864 bytes of iov[52] split by the IO boundary requirement. 1795 */ 1796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1797 expected_io->md_buf = md_buf + 256 * 8; 1798 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1799 iov[46].iov_len - 864); 1800 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1801 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1802 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1803 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1804 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1805 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1807 1808 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1809 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1810 */ 1811 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1812 expected_io->md_buf = md_buf + 384 * 8; 1813 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1814 iov[52].iov_len - 864); 1815 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1816 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1817 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1818 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1819 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1820 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1821 1822 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1823 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1824 */ 1825 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1826 expected_io->md_buf = md_buf + 512 * 8; 1827 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1828 iov[57].iov_len - 4960); 1829 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1830 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1831 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1832 1833 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1834 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1835 expected_io->md_buf = md_buf + 542 * 8; 1836 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1837 iov[59].iov_len - 3936); 1838 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1839 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1840 1841 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1842 0, 543, io_done, NULL); 1843 CU_ASSERT(rc == 0); 1844 CU_ASSERT(g_io_done == false); 1845 1846 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1847 stub_complete_io(1); 1848 CU_ASSERT(g_io_done == false); 1849 1850 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1851 stub_complete_io(5); 1852 CU_ASSERT(g_io_done == false); 1853 1854 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1855 stub_complete_io(1); 1856 CU_ASSERT(g_io_done == true); 1857 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1858 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1859 1860 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1861 * split, so test that. 1862 */ 1863 bdev->optimal_io_boundary = 15; 1864 g_io_done = false; 1865 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1866 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1867 1868 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1869 CU_ASSERT(rc == 0); 1870 CU_ASSERT(g_io_done == false); 1871 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1872 stub_complete_io(1); 1873 CU_ASSERT(g_io_done == true); 1874 1875 /* Test an UNMAP. This should also not be split. */ 1876 bdev->optimal_io_boundary = 16; 1877 g_io_done = false; 1878 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1879 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1880 1881 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1882 CU_ASSERT(rc == 0); 1883 CU_ASSERT(g_io_done == false); 1884 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1885 stub_complete_io(1); 1886 CU_ASSERT(g_io_done == true); 1887 1888 /* Test a FLUSH. This should also not be split. */ 1889 bdev->optimal_io_boundary = 16; 1890 g_io_done = false; 1891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1892 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1893 1894 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1895 CU_ASSERT(rc == 0); 1896 CU_ASSERT(g_io_done == false); 1897 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1898 stub_complete_io(1); 1899 CU_ASSERT(g_io_done == true); 1900 1901 /* Test a COPY. This should also not be split. */ 1902 bdev->optimal_io_boundary = 15; 1903 g_io_done = false; 1904 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1905 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1906 1907 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1908 CU_ASSERT(rc == 0); 1909 CU_ASSERT(g_io_done == false); 1910 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1911 stub_complete_io(1); 1912 CU_ASSERT(g_io_done == true); 1913 1914 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1915 1916 /* Children requests return an error status */ 1917 bdev->optimal_io_boundary = 16; 1918 iov[0].iov_base = (void *)0x10000; 1919 iov[0].iov_len = 512 * 64; 1920 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1921 g_io_done = false; 1922 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1923 1924 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1925 CU_ASSERT(rc == 0); 1926 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1927 stub_complete_io(4); 1928 CU_ASSERT(g_io_done == false); 1929 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1930 stub_complete_io(1); 1931 CU_ASSERT(g_io_done == true); 1932 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1933 1934 /* Test if a multi vector command terminated with failure before continuing 1935 * splitting process when one of child I/O failed. 1936 * The multi vector command is as same as the above that needs to be split by strip 1937 * and then needs to be split further due to the capacity of child iovs. 1938 */ 1939 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1940 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1941 iov[i].iov_len = 512; 1942 } 1943 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1944 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1945 1946 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1947 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1948 1949 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1950 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1951 1952 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1953 1954 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1955 g_io_done = false; 1956 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1957 1958 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1959 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1960 CU_ASSERT(rc == 0); 1961 CU_ASSERT(g_io_done == false); 1962 1963 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1964 stub_complete_io(1); 1965 CU_ASSERT(g_io_done == true); 1966 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1967 1968 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1969 1970 /* for this test we will create the following conditions to hit the code path where 1971 * we are trying to send and IO following a split that has no iovs because we had to 1972 * trim them for alignment reasons. 1973 * 1974 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1975 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1976 * position 30 and overshoot by 0x2e. 1977 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1978 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1979 * which eliniates that vector so we just send the first split IO with 30 vectors 1980 * and let the completion pick up the last 2 vectors. 1981 */ 1982 bdev->optimal_io_boundary = 32; 1983 bdev->split_on_optimal_io_boundary = true; 1984 g_io_done = false; 1985 1986 /* Init all parent IOVs to 0x212 */ 1987 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1988 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1989 iov[i].iov_len = 0x212; 1990 } 1991 1992 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1993 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1994 /* expect 0-29 to be 1:1 with the parent iov */ 1995 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1996 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1997 } 1998 1999 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 2000 * where 0x1e is the amount we overshot the 16K boundary 2001 */ 2002 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2003 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 2004 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2005 2006 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 2007 * shortened that take it to the next boundary and then a final one to get us to 2008 * 0x4200 bytes for the IO. 2009 */ 2010 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2011 1, 2); 2012 /* position 30 picked up the remaining bytes to the next boundary */ 2013 ut_expected_io_set_iov(expected_io, 0, 2014 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 2015 2016 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 2017 ut_expected_io_set_iov(expected_io, 1, 2018 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 2019 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2020 2021 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 2022 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2023 CU_ASSERT(rc == 0); 2024 CU_ASSERT(g_io_done == false); 2025 2026 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2027 stub_complete_io(1); 2028 CU_ASSERT(g_io_done == false); 2029 2030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2031 stub_complete_io(1); 2032 CU_ASSERT(g_io_done == true); 2033 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2034 2035 spdk_put_io_channel(io_ch); 2036 spdk_bdev_close(desc); 2037 free_bdev(bdev); 2038 ut_fini_bdev(); 2039 } 2040 2041 static void 2042 bdev_io_max_size_and_segment_split_test(void) 2043 { 2044 struct spdk_bdev *bdev; 2045 struct spdk_bdev_desc *desc = NULL; 2046 struct spdk_io_channel *io_ch; 2047 struct spdk_bdev_opts bdev_opts = {}; 2048 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2049 struct ut_expected_io *expected_io; 2050 uint64_t i; 2051 int rc; 2052 2053 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2054 bdev_opts.bdev_io_pool_size = 512; 2055 bdev_opts.bdev_io_cache_size = 64; 2056 bdev_opts.opts_size = sizeof(bdev_opts); 2057 ut_init_bdev(&bdev_opts); 2058 2059 bdev = allocate_bdev("bdev0"); 2060 2061 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2062 CU_ASSERT(rc == 0); 2063 SPDK_CU_ASSERT_FATAL(desc != NULL); 2064 io_ch = spdk_bdev_get_io_channel(desc); 2065 CU_ASSERT(io_ch != NULL); 2066 2067 bdev->split_on_optimal_io_boundary = false; 2068 bdev->optimal_io_boundary = 0; 2069 2070 /* Case 0 max_num_segments == 0. 2071 * but segment size 2 * 512 > 512 2072 */ 2073 bdev->max_segment_size = 512; 2074 bdev->max_num_segments = 0; 2075 g_io_done = false; 2076 2077 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2078 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2079 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2080 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2081 2082 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2083 CU_ASSERT(rc == 0); 2084 CU_ASSERT(g_io_done == false); 2085 2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2087 stub_complete_io(1); 2088 CU_ASSERT(g_io_done == true); 2089 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2090 2091 /* Case 1 max_segment_size == 0 2092 * but iov num 2 > 1. 2093 */ 2094 bdev->max_segment_size = 0; 2095 bdev->max_num_segments = 1; 2096 g_io_done = false; 2097 2098 iov[0].iov_base = (void *)0x10000; 2099 iov[0].iov_len = 512; 2100 iov[1].iov_base = (void *)0x20000; 2101 iov[1].iov_len = 8 * 512; 2102 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2104 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2106 2107 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2108 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2109 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2110 2111 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2112 CU_ASSERT(rc == 0); 2113 CU_ASSERT(g_io_done == false); 2114 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2116 stub_complete_io(2); 2117 CU_ASSERT(g_io_done == true); 2118 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2119 2120 /* Test that a non-vector command is split correctly. 2121 * Set up the expected values before calling spdk_bdev_read_blocks 2122 */ 2123 bdev->max_segment_size = 512; 2124 bdev->max_num_segments = 1; 2125 g_io_done = false; 2126 2127 /* Child IO 0 */ 2128 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2129 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2130 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2131 2132 /* Child IO 1 */ 2133 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2134 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2135 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2136 2137 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2138 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2139 CU_ASSERT(rc == 0); 2140 CU_ASSERT(g_io_done == false); 2141 2142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2143 stub_complete_io(2); 2144 CU_ASSERT(g_io_done == true); 2145 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2146 2147 /* Now set up a more complex, multi-vector command that needs to be split, 2148 * including splitting iovecs. 2149 */ 2150 bdev->max_segment_size = 2 * 512; 2151 bdev->max_num_segments = 1; 2152 g_io_done = false; 2153 2154 iov[0].iov_base = (void *)0x10000; 2155 iov[0].iov_len = 2 * 512; 2156 iov[1].iov_base = (void *)0x20000; 2157 iov[1].iov_len = 4 * 512; 2158 iov[2].iov_base = (void *)0x30000; 2159 iov[2].iov_len = 6 * 512; 2160 2161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2162 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2163 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2164 2165 /* Split iov[1].size to 2 iov entries then split the segments */ 2166 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2167 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2168 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2169 2170 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2171 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2172 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2173 2174 /* Split iov[2].size to 3 iov entries then split the segments */ 2175 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2176 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2177 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2178 2179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2180 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2181 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2182 2183 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2184 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2185 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2186 2187 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2188 CU_ASSERT(rc == 0); 2189 CU_ASSERT(g_io_done == false); 2190 2191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2192 stub_complete_io(6); 2193 CU_ASSERT(g_io_done == true); 2194 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2195 2196 /* Test multi vector command that needs to be split by strip and then needs to be 2197 * split further due to the capacity of parent IO child iovs. 2198 */ 2199 bdev->max_segment_size = 512; 2200 bdev->max_num_segments = 1; 2201 g_io_done = false; 2202 2203 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2204 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2205 iov[i].iov_len = 512 * 2; 2206 } 2207 2208 /* Each input iov.size is split into 2 iovs, 2209 * half of the input iov can fill all child iov entries of a single IO. 2210 */ 2211 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2212 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2213 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2214 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2215 2216 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2217 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2218 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2219 } 2220 2221 /* The remaining iov is split in the second round */ 2222 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2223 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2224 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2225 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2226 2227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2228 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2229 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2230 } 2231 2232 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2233 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2234 CU_ASSERT(rc == 0); 2235 CU_ASSERT(g_io_done == false); 2236 2237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2238 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2239 CU_ASSERT(g_io_done == false); 2240 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2242 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2243 CU_ASSERT(g_io_done == true); 2244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2245 2246 /* A wrong case, a child IO that is divided does 2247 * not meet the principle of multiples of block size, 2248 * and exits with error 2249 */ 2250 bdev->max_segment_size = 512; 2251 bdev->max_num_segments = 1; 2252 g_io_done = false; 2253 2254 iov[0].iov_base = (void *)0x10000; 2255 iov[0].iov_len = 512 + 256; 2256 iov[1].iov_base = (void *)0x20000; 2257 iov[1].iov_len = 256; 2258 2259 /* iov[0] is split to 512 and 256. 2260 * 256 is less than a block size, and it is found 2261 * in the next round of split that it is the first child IO smaller than 2262 * the block size, so the error exit 2263 */ 2264 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2265 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2266 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2267 2268 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2269 CU_ASSERT(rc == 0); 2270 CU_ASSERT(g_io_done == false); 2271 2272 /* First child IO is OK */ 2273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2274 stub_complete_io(1); 2275 CU_ASSERT(g_io_done == true); 2276 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2277 2278 /* error exit */ 2279 stub_complete_io(1); 2280 CU_ASSERT(g_io_done == true); 2281 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2282 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2283 2284 /* Test multi vector command that needs to be split by strip and then needs to be 2285 * split further due to the capacity of child iovs. 2286 * 2287 * In this case, the last two iovs need to be split, but it will exceed the capacity 2288 * of child iovs, so it needs to wait until the first batch completed. 2289 */ 2290 bdev->max_segment_size = 512; 2291 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2292 g_io_done = false; 2293 2294 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2295 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2296 iov[i].iov_len = 512; 2297 } 2298 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2299 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2300 iov[i].iov_len = 512 * 2; 2301 } 2302 2303 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2304 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2305 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2306 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2307 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2308 } 2309 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2310 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2311 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2312 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2313 2314 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2316 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2317 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2319 2320 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2321 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2322 CU_ASSERT(rc == 0); 2323 CU_ASSERT(g_io_done == false); 2324 2325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2326 stub_complete_io(1); 2327 CU_ASSERT(g_io_done == false); 2328 2329 /* Next round */ 2330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2331 stub_complete_io(1); 2332 CU_ASSERT(g_io_done == true); 2333 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2334 2335 /* This case is similar to the previous one, but the io composed of 2336 * the last few entries of child iov is not enough for a blocklen, so they 2337 * cannot be put into this IO, but wait until the next time. 2338 */ 2339 bdev->max_segment_size = 512; 2340 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2341 g_io_done = false; 2342 2343 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2344 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2345 iov[i].iov_len = 512; 2346 } 2347 2348 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2349 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2350 iov[i].iov_len = 128; 2351 } 2352 2353 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2354 * Because the left 2 iov is not enough for a blocklen. 2355 */ 2356 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2357 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2358 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2359 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2360 } 2361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2362 2363 /* The second child io waits until the end of the first child io before executing. 2364 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2365 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2366 */ 2367 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2368 1, 4); 2369 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2370 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2371 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2372 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2373 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2374 2375 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2376 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2377 CU_ASSERT(rc == 0); 2378 CU_ASSERT(g_io_done == false); 2379 2380 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2381 stub_complete_io(1); 2382 CU_ASSERT(g_io_done == false); 2383 2384 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2385 stub_complete_io(1); 2386 CU_ASSERT(g_io_done == true); 2387 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2388 2389 /* A very complicated case. Each sg entry exceeds max_segment_size and 2390 * needs to be split. At the same time, child io must be a multiple of blocklen. 2391 * At the same time, child iovcnt exceeds parent iovcnt. 2392 */ 2393 bdev->max_segment_size = 512 + 128; 2394 bdev->max_num_segments = 3; 2395 g_io_done = false; 2396 2397 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2398 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2399 iov[i].iov_len = 512 + 256; 2400 } 2401 2402 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2403 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2404 iov[i].iov_len = 512 + 128; 2405 } 2406 2407 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2408 * Consume 4 parent IO iov entries per for() round and 6 block size. 2409 * Generate 9 child IOs. 2410 */ 2411 for (i = 0; i < 3; i++) { 2412 uint32_t j = i * 4; 2413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2414 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2415 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2416 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2417 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2418 2419 /* Child io must be a multiple of blocklen 2420 * iov[j + 2] must be split. If the third entry is also added, 2421 * the multiple of blocklen cannot be guaranteed. But it still 2422 * occupies one iov entry of the parent child iov. 2423 */ 2424 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2425 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2426 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2428 2429 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2430 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2431 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2432 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2433 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2434 } 2435 2436 /* Child iov position at 27, the 10th child IO 2437 * iov entry index is 3 * 4 and offset is 3 * 6 2438 */ 2439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2440 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2441 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2442 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2443 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2444 2445 /* Child iov position at 30, the 11th child IO */ 2446 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2447 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2448 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2449 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2450 2451 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2453 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2454 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2455 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2457 2458 /* Consume 9 child IOs and 27 child iov entries. 2459 * Consume 4 parent IO iov entries per for() round and 6 block size. 2460 * Parent IO iov index start from 16 and block offset start from 24 2461 */ 2462 for (i = 0; i < 3; i++) { 2463 uint32_t j = i * 4 + 16; 2464 uint32_t offset = i * 6 + 24; 2465 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2466 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2467 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2468 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2469 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2470 2471 /* Child io must be a multiple of blocklen 2472 * iov[j + 2] must be split. If the third entry is also added, 2473 * the multiple of blocklen cannot be guaranteed. But it still 2474 * occupies one iov entry of the parent child iov. 2475 */ 2476 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2477 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2478 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2480 2481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2482 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2483 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2484 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2485 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2486 } 2487 2488 /* The 22th child IO, child iov position at 30 */ 2489 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2490 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2492 2493 /* The third round */ 2494 /* Here is the 23nd child IO and child iovpos is 0 */ 2495 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2496 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2497 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2498 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2499 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2500 2501 /* The 24th child IO */ 2502 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2503 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2504 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2505 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2506 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2507 2508 /* The 25th child IO */ 2509 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2510 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2511 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2513 2514 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2515 50, io_done, NULL); 2516 CU_ASSERT(rc == 0); 2517 CU_ASSERT(g_io_done == false); 2518 2519 /* Parent IO supports up to 32 child iovs, so it is calculated that 2520 * a maximum of 11 IOs can be split at a time, and the 2521 * splitting will continue after the first batch is over. 2522 */ 2523 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2524 stub_complete_io(11); 2525 CU_ASSERT(g_io_done == false); 2526 2527 /* The 2nd round */ 2528 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2529 stub_complete_io(11); 2530 CU_ASSERT(g_io_done == false); 2531 2532 /* The last round */ 2533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2534 stub_complete_io(3); 2535 CU_ASSERT(g_io_done == true); 2536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2537 2538 /* Test an WRITE_ZEROES. This should also not be split. */ 2539 bdev->max_segment_size = 512; 2540 bdev->max_num_segments = 1; 2541 g_io_done = false; 2542 2543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2544 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2545 2546 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2547 CU_ASSERT(rc == 0); 2548 CU_ASSERT(g_io_done == false); 2549 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2550 stub_complete_io(1); 2551 CU_ASSERT(g_io_done == true); 2552 2553 /* Test an UNMAP. This should also not be split. */ 2554 g_io_done = false; 2555 2556 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2557 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2558 2559 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2560 CU_ASSERT(rc == 0); 2561 CU_ASSERT(g_io_done == false); 2562 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2563 stub_complete_io(1); 2564 CU_ASSERT(g_io_done == true); 2565 2566 /* Test a FLUSH. This should also not be split. */ 2567 g_io_done = false; 2568 2569 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2570 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2571 2572 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 4, io_done, NULL); 2573 CU_ASSERT(rc == 0); 2574 CU_ASSERT(g_io_done == false); 2575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2576 stub_complete_io(1); 2577 CU_ASSERT(g_io_done == true); 2578 2579 /* Test a COPY. This should also not be split. */ 2580 g_io_done = false; 2581 2582 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2583 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2584 2585 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2586 CU_ASSERT(rc == 0); 2587 CU_ASSERT(g_io_done == false); 2588 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2589 stub_complete_io(1); 2590 CU_ASSERT(g_io_done == true); 2591 2592 /* Test that IOs are split on max_rw_size */ 2593 bdev->max_rw_size = 2; 2594 bdev->max_segment_size = 0; 2595 bdev->max_num_segments = 0; 2596 g_io_done = false; 2597 2598 /* 5 blocks in a contiguous buffer */ 2599 iov[0].iov_base = (void *)0x10000; 2600 iov[0].iov_len = 5 * 512; 2601 2602 /* First: offset=0, num_blocks=2 */ 2603 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2604 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2606 /* Second: offset=2, num_blocks=2 */ 2607 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1); 2608 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512); 2609 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2610 /* Third: offset=4, num_blocks=1 */ 2611 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2612 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512); 2613 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2614 2615 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL); 2616 CU_ASSERT(rc == 0); 2617 CU_ASSERT(g_io_done == false); 2618 2619 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2620 stub_complete_io(3); 2621 CU_ASSERT(g_io_done == true); 2622 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2623 2624 /* Check splitting on both max_rw_size + max_num_segments */ 2625 bdev->max_rw_size = 2; 2626 bdev->max_num_segments = 2; 2627 bdev->max_segment_size = 0; 2628 g_io_done = false; 2629 2630 /* 5 blocks split across 4 iovs */ 2631 iov[0].iov_base = (void *)0x10000; 2632 iov[0].iov_len = 3 * 512; 2633 iov[1].iov_base = (void *)0x20000; 2634 iov[1].iov_len = 256; 2635 iov[2].iov_base = (void *)0x30000; 2636 iov[2].iov_len = 256; 2637 iov[3].iov_base = (void *)0x40000; 2638 iov[3].iov_len = 512; 2639 2640 /* First: offset=0, num_blocks=2, iovcnt=1 */ 2641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2642 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2643 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2644 /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting 2645 * the rest of iov[0], and iov[1]+iov[2]) 2646 */ 2647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1); 2648 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512); 2649 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2650 /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */ 2651 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2); 2652 ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256); 2653 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256); 2654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2655 /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */ 2656 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2657 ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512); 2658 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2659 2660 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL); 2661 CU_ASSERT(rc == 0); 2662 CU_ASSERT(g_io_done == false); 2663 2664 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2665 stub_complete_io(4); 2666 CU_ASSERT(g_io_done == true); 2667 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2668 2669 /* Check splitting on both max_rw_size + max_segment_size */ 2670 bdev->max_rw_size = 2; 2671 bdev->max_segment_size = 512; 2672 bdev->max_num_segments = 0; 2673 g_io_done = false; 2674 2675 /* 6 blocks in a contiguous buffer */ 2676 iov[0].iov_base = (void *)0x10000; 2677 iov[0].iov_len = 6 * 512; 2678 2679 /* We expect 3 IOs each with 2 blocks and 2 iovs */ 2680 for (i = 0; i < 3; ++i) { 2681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2); 2682 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512); 2683 ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512); 2684 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2685 } 2686 2687 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL); 2688 CU_ASSERT(rc == 0); 2689 CU_ASSERT(g_io_done == false); 2690 2691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2692 stub_complete_io(3); 2693 CU_ASSERT(g_io_done == true); 2694 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2695 2696 /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */ 2697 bdev->max_rw_size = 1; 2698 bdev->max_segment_size = 0; 2699 bdev->max_num_segments = 0; 2700 g_io_done = false; 2701 2702 /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */ 2703 iov[0].iov_base = (void *)0x10000; 2704 iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512; 2705 2706 /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */ 2707 for (i = 0; i < 3; ++i) { 2708 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1); 2709 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512); 2710 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2711 } 2712 2713 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2714 CU_ASSERT(rc == 0); 2715 CU_ASSERT(g_io_done == false); 2716 2717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2718 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2719 CU_ASSERT(g_io_done == false); 2720 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2721 stub_complete_io(1); 2722 CU_ASSERT(g_io_done == true); 2723 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2724 2725 spdk_put_io_channel(io_ch); 2726 spdk_bdev_close(desc); 2727 free_bdev(bdev); 2728 ut_fini_bdev(); 2729 } 2730 2731 static void 2732 bdev_io_mix_split_test(void) 2733 { 2734 struct spdk_bdev *bdev; 2735 struct spdk_bdev_desc *desc = NULL; 2736 struct spdk_io_channel *io_ch; 2737 struct spdk_bdev_opts bdev_opts = {}; 2738 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2739 struct ut_expected_io *expected_io; 2740 uint64_t i; 2741 int rc; 2742 2743 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2744 bdev_opts.bdev_io_pool_size = 512; 2745 bdev_opts.bdev_io_cache_size = 64; 2746 ut_init_bdev(&bdev_opts); 2747 2748 bdev = allocate_bdev("bdev0"); 2749 2750 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2751 CU_ASSERT(rc == 0); 2752 SPDK_CU_ASSERT_FATAL(desc != NULL); 2753 io_ch = spdk_bdev_get_io_channel(desc); 2754 CU_ASSERT(io_ch != NULL); 2755 2756 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2757 bdev->split_on_optimal_io_boundary = true; 2758 bdev->optimal_io_boundary = 16; 2759 2760 bdev->max_segment_size = 512; 2761 bdev->max_num_segments = 16; 2762 g_io_done = false; 2763 2764 /* IO crossing the IO boundary requires split 2765 * Total 2 child IOs. 2766 */ 2767 2768 /* The 1st child IO split the segment_size to multiple segment entry */ 2769 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2770 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2771 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2772 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2773 2774 /* The 2nd child IO split the segment_size to multiple segment entry */ 2775 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2776 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2777 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2778 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2779 2780 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2781 CU_ASSERT(rc == 0); 2782 CU_ASSERT(g_io_done == false); 2783 2784 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2785 stub_complete_io(2); 2786 CU_ASSERT(g_io_done == true); 2787 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2788 2789 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2790 bdev->max_segment_size = 15 * 512; 2791 bdev->max_num_segments = 1; 2792 g_io_done = false; 2793 2794 /* IO crossing the IO boundary requires split. 2795 * The 1st child IO segment size exceeds the max_segment_size, 2796 * So 1st child IO will be split to multiple segment entry. 2797 * Then it split to 2 child IOs because of the max_num_segments. 2798 * Total 3 child IOs. 2799 */ 2800 2801 /* The first 2 IOs are in an IO boundary. 2802 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2803 * So it split to the first 2 IOs. 2804 */ 2805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2806 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2807 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2808 2809 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2810 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2811 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2812 2813 /* The 3rd Child IO is because of the io boundary */ 2814 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2815 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2816 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2817 2818 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2819 CU_ASSERT(rc == 0); 2820 CU_ASSERT(g_io_done == false); 2821 2822 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2823 stub_complete_io(3); 2824 CU_ASSERT(g_io_done == true); 2825 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2826 2827 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2828 bdev->max_segment_size = 17 * 512; 2829 bdev->max_num_segments = 1; 2830 g_io_done = false; 2831 2832 /* IO crossing the IO boundary requires split. 2833 * Child IO does not split. 2834 * Total 2 child IOs. 2835 */ 2836 2837 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2838 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2839 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2840 2841 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2842 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2843 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2844 2845 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2846 CU_ASSERT(rc == 0); 2847 CU_ASSERT(g_io_done == false); 2848 2849 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2850 stub_complete_io(2); 2851 CU_ASSERT(g_io_done == true); 2852 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2853 2854 /* Now set up a more complex, multi-vector command that needs to be split, 2855 * including splitting iovecs. 2856 * optimal_io_boundary < max_segment_size * max_num_segments 2857 */ 2858 bdev->max_segment_size = 3 * 512; 2859 bdev->max_num_segments = 6; 2860 g_io_done = false; 2861 2862 iov[0].iov_base = (void *)0x10000; 2863 iov[0].iov_len = 4 * 512; 2864 iov[1].iov_base = (void *)0x20000; 2865 iov[1].iov_len = 4 * 512; 2866 iov[2].iov_base = (void *)0x30000; 2867 iov[2].iov_len = 10 * 512; 2868 2869 /* IO crossing the IO boundary requires split. 2870 * The 1st child IO segment size exceeds the max_segment_size and after 2871 * splitting segment_size, the num_segments exceeds max_num_segments. 2872 * So 1st child IO will be split to 2 child IOs. 2873 * Total 3 child IOs. 2874 */ 2875 2876 /* The first 2 IOs are in an IO boundary. 2877 * After splitting segment size the segment num exceeds. 2878 * So it splits to 2 child IOs. 2879 */ 2880 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2881 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2882 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2883 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2884 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2885 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2886 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2887 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2888 2889 /* The 2nd child IO has the left segment entry */ 2890 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2891 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2892 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2893 2894 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2895 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2896 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2897 2898 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2899 CU_ASSERT(rc == 0); 2900 CU_ASSERT(g_io_done == false); 2901 2902 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2903 stub_complete_io(3); 2904 CU_ASSERT(g_io_done == true); 2905 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2906 2907 /* A very complicated case. Each sg entry exceeds max_segment_size 2908 * and split on io boundary. 2909 * optimal_io_boundary < max_segment_size * max_num_segments 2910 */ 2911 bdev->max_segment_size = 3 * 512; 2912 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2913 g_io_done = false; 2914 2915 for (i = 0; i < 20; i++) { 2916 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2917 iov[i].iov_len = 512 * 4; 2918 } 2919 2920 /* IO crossing the IO boundary requires split. 2921 * 80 block length can split 5 child IOs base on offset and IO boundary. 2922 * Each iov entry needs to be split to 2 entries because of max_segment_size 2923 * Total 5 child IOs. 2924 */ 2925 2926 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2927 * So each child IO occupies 8 child iov entries. 2928 */ 2929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2930 for (i = 0; i < 4; i++) { 2931 int iovcnt = i * 2; 2932 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2933 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2934 } 2935 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2936 2937 /* 2nd child IO and total 16 child iov entries of parent IO */ 2938 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2939 for (i = 4; i < 8; i++) { 2940 int iovcnt = (i - 4) * 2; 2941 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2942 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2943 } 2944 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2945 2946 /* 3rd child IO and total 24 child iov entries of parent IO */ 2947 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2948 for (i = 8; i < 12; i++) { 2949 int iovcnt = (i - 8) * 2; 2950 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2951 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2952 } 2953 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2954 2955 /* 4th child IO and total 32 child iov entries of parent IO */ 2956 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2957 for (i = 12; i < 16; i++) { 2958 int iovcnt = (i - 12) * 2; 2959 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2960 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2961 } 2962 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2963 2964 /* 5th child IO and because of the child iov entry it should be split 2965 * in next round. 2966 */ 2967 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2968 for (i = 16; i < 20; i++) { 2969 int iovcnt = (i - 16) * 2; 2970 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2971 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2972 } 2973 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2974 2975 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2976 CU_ASSERT(rc == 0); 2977 CU_ASSERT(g_io_done == false); 2978 2979 /* First split round */ 2980 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2981 stub_complete_io(4); 2982 CU_ASSERT(g_io_done == false); 2983 2984 /* Second split round */ 2985 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2986 stub_complete_io(1); 2987 CU_ASSERT(g_io_done == true); 2988 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2989 2990 spdk_put_io_channel(io_ch); 2991 spdk_bdev_close(desc); 2992 free_bdev(bdev); 2993 ut_fini_bdev(); 2994 } 2995 2996 static void 2997 bdev_io_split_with_io_wait(void) 2998 { 2999 struct spdk_bdev *bdev; 3000 struct spdk_bdev_desc *desc = NULL; 3001 struct spdk_io_channel *io_ch; 3002 struct spdk_bdev_channel *channel; 3003 struct spdk_bdev_mgmt_channel *mgmt_ch; 3004 struct spdk_bdev_opts bdev_opts = {}; 3005 struct iovec iov[3]; 3006 struct ut_expected_io *expected_io; 3007 int rc; 3008 3009 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3010 bdev_opts.bdev_io_pool_size = 2; 3011 bdev_opts.bdev_io_cache_size = 1; 3012 ut_init_bdev(&bdev_opts); 3013 3014 bdev = allocate_bdev("bdev0"); 3015 3016 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3017 CU_ASSERT(rc == 0); 3018 CU_ASSERT(desc != NULL); 3019 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3020 io_ch = spdk_bdev_get_io_channel(desc); 3021 CU_ASSERT(io_ch != NULL); 3022 channel = spdk_io_channel_get_ctx(io_ch); 3023 mgmt_ch = channel->shared_resource->mgmt_ch; 3024 3025 bdev->optimal_io_boundary = 16; 3026 bdev->split_on_optimal_io_boundary = true; 3027 3028 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 3029 CU_ASSERT(rc == 0); 3030 3031 /* Now test that a single-vector command is split correctly. 3032 * Offset 14, length 8, payload 0xF000 3033 * Child - Offset 14, length 2, payload 0xF000 3034 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3035 * 3036 * Set up the expected values before calling spdk_bdev_read_blocks 3037 */ 3038 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 3039 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 3040 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3041 3042 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 3043 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 3044 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3045 3046 /* The following children will be submitted sequentially due to the capacity of 3047 * spdk_bdev_io. 3048 */ 3049 3050 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 3051 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 3052 CU_ASSERT(rc == 0); 3053 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3054 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3055 3056 /* Completing the first read I/O will submit the first child */ 3057 stub_complete_io(1); 3058 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3060 3061 /* Completing the first child will submit the second child */ 3062 stub_complete_io(1); 3063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3064 3065 /* Complete the second child I/O. This should result in our callback getting 3066 * invoked since the parent I/O is now complete. 3067 */ 3068 stub_complete_io(1); 3069 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3070 3071 /* Now set up a more complex, multi-vector command that needs to be split, 3072 * including splitting iovecs. 3073 */ 3074 iov[0].iov_base = (void *)0x10000; 3075 iov[0].iov_len = 512; 3076 iov[1].iov_base = (void *)0x20000; 3077 iov[1].iov_len = 20 * 512; 3078 iov[2].iov_base = (void *)0x30000; 3079 iov[2].iov_len = 11 * 512; 3080 3081 g_io_done = false; 3082 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 3083 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 3084 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 3085 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3086 3087 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 3088 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 3089 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3090 3091 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 3092 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 3093 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 3094 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3095 3096 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 3097 CU_ASSERT(rc == 0); 3098 CU_ASSERT(g_io_done == false); 3099 3100 /* The following children will be submitted sequentially due to the capacity of 3101 * spdk_bdev_io. 3102 */ 3103 3104 /* Completing the first child will submit the second child */ 3105 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3106 stub_complete_io(1); 3107 CU_ASSERT(g_io_done == false); 3108 3109 /* Completing the second child will submit the third child */ 3110 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3111 stub_complete_io(1); 3112 CU_ASSERT(g_io_done == false); 3113 3114 /* Completing the third child will result in our callback getting invoked 3115 * since the parent I/O is now complete. 3116 */ 3117 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3118 stub_complete_io(1); 3119 CU_ASSERT(g_io_done == true); 3120 3121 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 3122 3123 spdk_put_io_channel(io_ch); 3124 spdk_bdev_close(desc); 3125 free_bdev(bdev); 3126 ut_fini_bdev(); 3127 } 3128 3129 static void 3130 bdev_io_write_unit_split_test(void) 3131 { 3132 struct spdk_bdev *bdev; 3133 struct spdk_bdev_desc *desc = NULL; 3134 struct spdk_io_channel *io_ch; 3135 struct spdk_bdev_opts bdev_opts = {}; 3136 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 3137 struct ut_expected_io *expected_io; 3138 uint64_t i; 3139 int rc; 3140 3141 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3142 bdev_opts.bdev_io_pool_size = 512; 3143 bdev_opts.bdev_io_cache_size = 64; 3144 ut_init_bdev(&bdev_opts); 3145 3146 bdev = allocate_bdev("bdev0"); 3147 3148 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 3149 CU_ASSERT(rc == 0); 3150 SPDK_CU_ASSERT_FATAL(desc != NULL); 3151 io_ch = spdk_bdev_get_io_channel(desc); 3152 CU_ASSERT(io_ch != NULL); 3153 3154 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 3155 bdev->write_unit_size = 32; 3156 bdev->split_on_write_unit = true; 3157 g_io_done = false; 3158 3159 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 3160 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 3161 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3162 3163 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3164 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3166 3167 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3168 CU_ASSERT(rc == 0); 3169 CU_ASSERT(g_io_done == false); 3170 3171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3172 stub_complete_io(2); 3173 CU_ASSERT(g_io_done == true); 3174 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3175 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3176 3177 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3178 * based on write_unit_size, not optimal_io_boundary */ 3179 bdev->split_on_optimal_io_boundary = true; 3180 bdev->optimal_io_boundary = 16; 3181 g_io_done = false; 3182 3183 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3184 CU_ASSERT(rc == 0); 3185 CU_ASSERT(g_io_done == false); 3186 3187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3188 stub_complete_io(2); 3189 CU_ASSERT(g_io_done == true); 3190 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3191 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3192 3193 /* Write I/O should fail if it is smaller than write_unit_size */ 3194 g_io_done = false; 3195 3196 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3197 CU_ASSERT(rc == 0); 3198 CU_ASSERT(g_io_done == false); 3199 3200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3201 poll_threads(); 3202 CU_ASSERT(g_io_done == true); 3203 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3204 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3205 3206 /* Same for I/O not aligned to write_unit_size */ 3207 g_io_done = false; 3208 3209 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3210 CU_ASSERT(rc == 0); 3211 CU_ASSERT(g_io_done == false); 3212 3213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3214 poll_threads(); 3215 CU_ASSERT(g_io_done == true); 3216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3217 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3218 3219 /* Write should fail if it needs to be split but there are not enough iovs to submit 3220 * an entire write unit */ 3221 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3222 g_io_done = false; 3223 3224 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3225 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3226 iov[i].iov_len = 512; 3227 } 3228 3229 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3230 io_done, NULL); 3231 CU_ASSERT(rc == 0); 3232 CU_ASSERT(g_io_done == false); 3233 3234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3235 poll_threads(); 3236 CU_ASSERT(g_io_done == true); 3237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3238 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3239 3240 spdk_put_io_channel(io_ch); 3241 spdk_bdev_close(desc); 3242 free_bdev(bdev); 3243 ut_fini_bdev(); 3244 } 3245 3246 static void 3247 bdev_io_alignment(void) 3248 { 3249 struct spdk_bdev *bdev; 3250 struct spdk_bdev_desc *desc = NULL; 3251 struct spdk_io_channel *io_ch; 3252 struct spdk_bdev_opts bdev_opts = {}; 3253 int rc; 3254 void *buf = NULL; 3255 struct iovec iovs[2]; 3256 int iovcnt; 3257 uint64_t alignment; 3258 3259 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3260 bdev_opts.bdev_io_pool_size = 20; 3261 bdev_opts.bdev_io_cache_size = 2; 3262 ut_init_bdev(&bdev_opts); 3263 3264 fn_table.submit_request = stub_submit_request_get_buf; 3265 bdev = allocate_bdev("bdev0"); 3266 3267 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3268 CU_ASSERT(rc == 0); 3269 CU_ASSERT(desc != NULL); 3270 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3271 io_ch = spdk_bdev_get_io_channel(desc); 3272 CU_ASSERT(io_ch != NULL); 3273 3274 /* Create aligned buffer */ 3275 rc = posix_memalign(&buf, 4096, 8192); 3276 SPDK_CU_ASSERT_FATAL(rc == 0); 3277 3278 /* Pass aligned single buffer with no alignment required */ 3279 alignment = 1; 3280 bdev->required_alignment = spdk_u32log2(alignment); 3281 3282 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3283 CU_ASSERT(rc == 0); 3284 stub_complete_io(1); 3285 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3286 alignment)); 3287 3288 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3289 CU_ASSERT(rc == 0); 3290 stub_complete_io(1); 3291 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3292 alignment)); 3293 3294 /* Pass unaligned single buffer with no alignment required */ 3295 alignment = 1; 3296 bdev->required_alignment = spdk_u32log2(alignment); 3297 3298 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3299 CU_ASSERT(rc == 0); 3300 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3301 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3302 stub_complete_io(1); 3303 3304 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3305 CU_ASSERT(rc == 0); 3306 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3307 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3308 stub_complete_io(1); 3309 3310 /* Pass unaligned single buffer with 512 alignment required */ 3311 alignment = 512; 3312 bdev->required_alignment = spdk_u32log2(alignment); 3313 3314 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3315 CU_ASSERT(rc == 0); 3316 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3317 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3318 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3319 alignment)); 3320 stub_complete_io(1); 3321 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3322 3323 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3324 CU_ASSERT(rc == 0); 3325 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3326 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3327 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3328 alignment)); 3329 stub_complete_io(1); 3330 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3331 3332 /* Pass unaligned single buffer with 4096 alignment required */ 3333 alignment = 4096; 3334 bdev->required_alignment = spdk_u32log2(alignment); 3335 3336 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3337 CU_ASSERT(rc == 0); 3338 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3339 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3340 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3341 alignment)); 3342 stub_complete_io(1); 3343 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3344 3345 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3346 CU_ASSERT(rc == 0); 3347 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3348 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3349 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3350 alignment)); 3351 stub_complete_io(1); 3352 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3353 3354 /* Pass aligned iovs with no alignment required */ 3355 alignment = 1; 3356 bdev->required_alignment = spdk_u32log2(alignment); 3357 3358 iovcnt = 1; 3359 iovs[0].iov_base = buf; 3360 iovs[0].iov_len = 512; 3361 3362 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3363 CU_ASSERT(rc == 0); 3364 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3365 stub_complete_io(1); 3366 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3367 3368 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3369 CU_ASSERT(rc == 0); 3370 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3371 stub_complete_io(1); 3372 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3373 3374 /* Pass unaligned iovs with no alignment required */ 3375 alignment = 1; 3376 bdev->required_alignment = spdk_u32log2(alignment); 3377 3378 iovcnt = 2; 3379 iovs[0].iov_base = buf + 16; 3380 iovs[0].iov_len = 256; 3381 iovs[1].iov_base = buf + 16 + 256 + 32; 3382 iovs[1].iov_len = 256; 3383 3384 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3385 CU_ASSERT(rc == 0); 3386 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3387 stub_complete_io(1); 3388 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3389 3390 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3391 CU_ASSERT(rc == 0); 3392 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3393 stub_complete_io(1); 3394 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3395 3396 /* Pass unaligned iov with 2048 alignment required */ 3397 alignment = 2048; 3398 bdev->required_alignment = spdk_u32log2(alignment); 3399 3400 iovcnt = 2; 3401 iovs[0].iov_base = buf + 16; 3402 iovs[0].iov_len = 256; 3403 iovs[1].iov_base = buf + 16 + 256 + 32; 3404 iovs[1].iov_len = 256; 3405 3406 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3407 CU_ASSERT(rc == 0); 3408 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3409 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3410 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3411 alignment)); 3412 stub_complete_io(1); 3413 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3414 3415 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3416 CU_ASSERT(rc == 0); 3417 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3418 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3419 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3420 alignment)); 3421 stub_complete_io(1); 3422 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3423 3424 /* Pass iov without allocated buffer without alignment required */ 3425 alignment = 1; 3426 bdev->required_alignment = spdk_u32log2(alignment); 3427 3428 iovcnt = 1; 3429 iovs[0].iov_base = NULL; 3430 iovs[0].iov_len = 0; 3431 3432 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3433 CU_ASSERT(rc == 0); 3434 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3435 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3436 alignment)); 3437 stub_complete_io(1); 3438 3439 /* Pass iov without allocated buffer with 1024 alignment required */ 3440 alignment = 1024; 3441 bdev->required_alignment = spdk_u32log2(alignment); 3442 3443 iovcnt = 1; 3444 iovs[0].iov_base = NULL; 3445 iovs[0].iov_len = 0; 3446 3447 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3448 CU_ASSERT(rc == 0); 3449 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3450 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3451 alignment)); 3452 stub_complete_io(1); 3453 3454 spdk_put_io_channel(io_ch); 3455 spdk_bdev_close(desc); 3456 free_bdev(bdev); 3457 fn_table.submit_request = stub_submit_request; 3458 ut_fini_bdev(); 3459 3460 free(buf); 3461 } 3462 3463 static void 3464 bdev_io_alignment_with_boundary(void) 3465 { 3466 struct spdk_bdev *bdev; 3467 struct spdk_bdev_desc *desc = NULL; 3468 struct spdk_io_channel *io_ch; 3469 struct spdk_bdev_opts bdev_opts = {}; 3470 int rc; 3471 void *buf = NULL; 3472 struct iovec iovs[2]; 3473 int iovcnt; 3474 uint64_t alignment; 3475 3476 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3477 bdev_opts.bdev_io_pool_size = 20; 3478 bdev_opts.bdev_io_cache_size = 2; 3479 bdev_opts.opts_size = sizeof(bdev_opts); 3480 ut_init_bdev(&bdev_opts); 3481 3482 fn_table.submit_request = stub_submit_request_get_buf; 3483 bdev = allocate_bdev("bdev0"); 3484 3485 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3486 CU_ASSERT(rc == 0); 3487 CU_ASSERT(desc != NULL); 3488 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3489 io_ch = spdk_bdev_get_io_channel(desc); 3490 CU_ASSERT(io_ch != NULL); 3491 3492 /* Create aligned buffer */ 3493 rc = posix_memalign(&buf, 4096, 131072); 3494 SPDK_CU_ASSERT_FATAL(rc == 0); 3495 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3496 3497 #ifdef NOTDEF 3498 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3499 alignment = 512; 3500 bdev->required_alignment = spdk_u32log2(alignment); 3501 bdev->optimal_io_boundary = 2; 3502 bdev->split_on_optimal_io_boundary = true; 3503 3504 iovcnt = 1; 3505 iovs[0].iov_base = NULL; 3506 iovs[0].iov_len = 512 * 3; 3507 3508 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3509 CU_ASSERT(rc == 0); 3510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3511 stub_complete_io(2); 3512 3513 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3514 alignment = 512; 3515 bdev->required_alignment = spdk_u32log2(alignment); 3516 bdev->optimal_io_boundary = 16; 3517 bdev->split_on_optimal_io_boundary = true; 3518 3519 iovcnt = 1; 3520 iovs[0].iov_base = NULL; 3521 iovs[0].iov_len = 512 * 16; 3522 3523 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3524 CU_ASSERT(rc == 0); 3525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3526 stub_complete_io(2); 3527 3528 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3529 alignment = 512; 3530 bdev->required_alignment = spdk_u32log2(alignment); 3531 bdev->optimal_io_boundary = 128; 3532 bdev->split_on_optimal_io_boundary = true; 3533 3534 iovcnt = 1; 3535 iovs[0].iov_base = buf + 16; 3536 iovs[0].iov_len = 512 * 160; 3537 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3538 CU_ASSERT(rc == 0); 3539 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3540 stub_complete_io(2); 3541 3542 #endif 3543 3544 /* 512 * 3 with 2 IO boundary */ 3545 alignment = 512; 3546 bdev->required_alignment = spdk_u32log2(alignment); 3547 bdev->optimal_io_boundary = 2; 3548 bdev->split_on_optimal_io_boundary = true; 3549 3550 iovcnt = 2; 3551 iovs[0].iov_base = buf + 16; 3552 iovs[0].iov_len = 512; 3553 iovs[1].iov_base = buf + 16 + 512 + 32; 3554 iovs[1].iov_len = 1024; 3555 3556 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3557 CU_ASSERT(rc == 0); 3558 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3559 stub_complete_io(2); 3560 3561 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3562 CU_ASSERT(rc == 0); 3563 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3564 stub_complete_io(2); 3565 3566 /* 512 * 64 with 32 IO boundary */ 3567 bdev->optimal_io_boundary = 32; 3568 iovcnt = 2; 3569 iovs[0].iov_base = buf + 16; 3570 iovs[0].iov_len = 16384; 3571 iovs[1].iov_base = buf + 16 + 16384 + 32; 3572 iovs[1].iov_len = 16384; 3573 3574 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3575 CU_ASSERT(rc == 0); 3576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3577 stub_complete_io(3); 3578 3579 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3580 CU_ASSERT(rc == 0); 3581 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3582 stub_complete_io(3); 3583 3584 /* 512 * 160 with 32 IO boundary */ 3585 iovcnt = 1; 3586 iovs[0].iov_base = buf + 16; 3587 iovs[0].iov_len = 16384 + 65536; 3588 3589 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3590 CU_ASSERT(rc == 0); 3591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3592 stub_complete_io(6); 3593 3594 spdk_put_io_channel(io_ch); 3595 spdk_bdev_close(desc); 3596 free_bdev(bdev); 3597 fn_table.submit_request = stub_submit_request; 3598 ut_fini_bdev(); 3599 3600 free(buf); 3601 } 3602 3603 static void 3604 histogram_status_cb(void *cb_arg, int status) 3605 { 3606 g_status = status; 3607 } 3608 3609 static void 3610 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3611 { 3612 g_status = status; 3613 g_histogram = histogram; 3614 } 3615 3616 static void 3617 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3618 uint64_t total, uint64_t so_far) 3619 { 3620 g_count += count; 3621 } 3622 3623 static void 3624 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3625 { 3626 spdk_histogram_data_fn cb_fn = cb_arg; 3627 3628 g_status = status; 3629 3630 if (status == 0) { 3631 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3632 } 3633 } 3634 3635 static void 3636 bdev_histograms(void) 3637 { 3638 struct spdk_bdev *bdev; 3639 struct spdk_bdev_desc *desc = NULL; 3640 struct spdk_io_channel *ch; 3641 struct spdk_histogram_data *histogram; 3642 uint8_t buf[4096]; 3643 int rc; 3644 3645 ut_init_bdev(NULL); 3646 3647 bdev = allocate_bdev("bdev"); 3648 3649 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3650 CU_ASSERT(rc == 0); 3651 CU_ASSERT(desc != NULL); 3652 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3653 3654 ch = spdk_bdev_get_io_channel(desc); 3655 CU_ASSERT(ch != NULL); 3656 3657 /* Enable histogram */ 3658 g_status = -1; 3659 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3660 poll_threads(); 3661 CU_ASSERT(g_status == 0); 3662 CU_ASSERT(bdev->internal.histogram_enabled == true); 3663 3664 /* Allocate histogram */ 3665 histogram = spdk_histogram_data_alloc(); 3666 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3667 3668 /* Check if histogram is zeroed */ 3669 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3670 poll_threads(); 3671 CU_ASSERT(g_status == 0); 3672 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3673 3674 g_count = 0; 3675 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3676 3677 CU_ASSERT(g_count == 0); 3678 3679 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3680 CU_ASSERT(rc == 0); 3681 3682 spdk_delay_us(10); 3683 stub_complete_io(1); 3684 poll_threads(); 3685 3686 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3687 CU_ASSERT(rc == 0); 3688 3689 spdk_delay_us(10); 3690 stub_complete_io(1); 3691 poll_threads(); 3692 3693 /* Check if histogram gathered data from all I/O channels */ 3694 g_histogram = NULL; 3695 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3696 poll_threads(); 3697 CU_ASSERT(g_status == 0); 3698 CU_ASSERT(bdev->internal.histogram_enabled == true); 3699 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3700 3701 g_count = 0; 3702 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3703 CU_ASSERT(g_count == 2); 3704 3705 g_count = 0; 3706 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3707 CU_ASSERT(g_status == 0); 3708 CU_ASSERT(g_count == 2); 3709 3710 /* Disable histogram */ 3711 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3712 poll_threads(); 3713 CU_ASSERT(g_status == 0); 3714 CU_ASSERT(bdev->internal.histogram_enabled == false); 3715 3716 /* Try to run histogram commands on disabled bdev */ 3717 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3718 poll_threads(); 3719 CU_ASSERT(g_status == -EFAULT); 3720 3721 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3722 CU_ASSERT(g_status == -EFAULT); 3723 3724 spdk_histogram_data_free(histogram); 3725 spdk_put_io_channel(ch); 3726 spdk_bdev_close(desc); 3727 free_bdev(bdev); 3728 ut_fini_bdev(); 3729 } 3730 3731 static void 3732 _bdev_compare(bool emulated) 3733 { 3734 struct spdk_bdev *bdev; 3735 struct spdk_bdev_desc *desc = NULL; 3736 struct spdk_io_channel *ioch; 3737 struct ut_expected_io *expected_io; 3738 uint64_t offset, num_blocks; 3739 uint32_t num_completed; 3740 char aa_buf[512]; 3741 char bb_buf[512]; 3742 struct iovec compare_iov; 3743 uint8_t expected_io_type; 3744 int rc; 3745 3746 if (emulated) { 3747 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3748 } else { 3749 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3750 } 3751 3752 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3753 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3754 3755 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3756 3757 ut_init_bdev(NULL); 3758 fn_table.submit_request = stub_submit_request_get_buf; 3759 bdev = allocate_bdev("bdev"); 3760 3761 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3762 CU_ASSERT_EQUAL(rc, 0); 3763 SPDK_CU_ASSERT_FATAL(desc != NULL); 3764 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3765 ioch = spdk_bdev_get_io_channel(desc); 3766 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3767 3768 fn_table.submit_request = stub_submit_request_get_buf; 3769 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3770 3771 offset = 50; 3772 num_blocks = 1; 3773 compare_iov.iov_base = aa_buf; 3774 compare_iov.iov_len = sizeof(aa_buf); 3775 3776 /* 1. successful comparev */ 3777 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3778 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3779 3780 g_io_done = false; 3781 g_compare_read_buf = aa_buf; 3782 g_compare_read_buf_len = sizeof(aa_buf); 3783 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3784 CU_ASSERT_EQUAL(rc, 0); 3785 num_completed = stub_complete_io(1); 3786 CU_ASSERT_EQUAL(num_completed, 1); 3787 CU_ASSERT(g_io_done == true); 3788 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3789 3790 /* 2. miscompare comparev */ 3791 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3792 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3793 3794 g_io_done = false; 3795 g_compare_read_buf = bb_buf; 3796 g_compare_read_buf_len = sizeof(bb_buf); 3797 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3798 CU_ASSERT_EQUAL(rc, 0); 3799 num_completed = stub_complete_io(1); 3800 CU_ASSERT_EQUAL(num_completed, 1); 3801 CU_ASSERT(g_io_done == true); 3802 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3803 3804 /* 3. successful compare */ 3805 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3806 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3807 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3808 3809 g_io_done = false; 3810 g_compare_read_buf = aa_buf; 3811 g_compare_read_buf_len = sizeof(aa_buf); 3812 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3813 CU_ASSERT_EQUAL(rc, 0); 3814 num_completed = stub_complete_io(1); 3815 CU_ASSERT_EQUAL(num_completed, 1); 3816 CU_ASSERT(g_io_done == true); 3817 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3818 3819 /* 4. miscompare compare */ 3820 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3821 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3822 3823 g_io_done = false; 3824 g_compare_read_buf = bb_buf; 3825 g_compare_read_buf_len = sizeof(bb_buf); 3826 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3827 CU_ASSERT_EQUAL(rc, 0); 3828 num_completed = stub_complete_io(1); 3829 CU_ASSERT_EQUAL(num_completed, 1); 3830 CU_ASSERT(g_io_done == true); 3831 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3832 3833 spdk_put_io_channel(ioch); 3834 spdk_bdev_close(desc); 3835 free_bdev(bdev); 3836 fn_table.submit_request = stub_submit_request; 3837 ut_fini_bdev(); 3838 3839 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3840 3841 g_compare_read_buf = NULL; 3842 } 3843 3844 static void 3845 _bdev_compare_with_md(bool emulated) 3846 { 3847 struct spdk_bdev *bdev; 3848 struct spdk_bdev_desc *desc = NULL; 3849 struct spdk_io_channel *ioch; 3850 struct ut_expected_io *expected_io; 3851 uint64_t offset, num_blocks; 3852 uint32_t num_completed; 3853 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3854 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3855 char buf_miscompare[1024 /* 2 * blocklen */]; 3856 char md_buf[16]; 3857 char md_buf_miscompare[16]; 3858 struct iovec compare_iov; 3859 uint8_t expected_io_type; 3860 int rc; 3861 3862 if (emulated) { 3863 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3864 } else { 3865 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3866 } 3867 3868 memset(buf, 0xaa, sizeof(buf)); 3869 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3870 /* make last md different */ 3871 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3872 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3873 memset(md_buf, 0xaa, 16); 3874 memset(md_buf_miscompare, 0xbb, 16); 3875 3876 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3877 3878 ut_init_bdev(NULL); 3879 fn_table.submit_request = stub_submit_request_get_buf; 3880 bdev = allocate_bdev("bdev"); 3881 3882 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3883 CU_ASSERT_EQUAL(rc, 0); 3884 SPDK_CU_ASSERT_FATAL(desc != NULL); 3885 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3886 ioch = spdk_bdev_get_io_channel(desc); 3887 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3888 3889 fn_table.submit_request = stub_submit_request_get_buf; 3890 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3891 3892 offset = 50; 3893 num_blocks = 2; 3894 3895 /* interleaved md & data */ 3896 bdev->md_interleave = true; 3897 bdev->md_len = 8; 3898 bdev->blocklen = 512 + 8; 3899 compare_iov.iov_base = buf; 3900 compare_iov.iov_len = sizeof(buf); 3901 3902 /* 1. successful compare with md interleaved */ 3903 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3904 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3905 3906 g_io_done = false; 3907 g_compare_read_buf = buf; 3908 g_compare_read_buf_len = sizeof(buf); 3909 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3910 CU_ASSERT_EQUAL(rc, 0); 3911 num_completed = stub_complete_io(1); 3912 CU_ASSERT_EQUAL(num_completed, 1); 3913 CU_ASSERT(g_io_done == true); 3914 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3915 3916 /* 2. miscompare with md interleaved */ 3917 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3918 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3919 3920 g_io_done = false; 3921 g_compare_read_buf = buf_interleaved_miscompare; 3922 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3923 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3924 CU_ASSERT_EQUAL(rc, 0); 3925 num_completed = stub_complete_io(1); 3926 CU_ASSERT_EQUAL(num_completed, 1); 3927 CU_ASSERT(g_io_done == true); 3928 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3929 3930 /* Separate data & md buffers */ 3931 bdev->md_interleave = false; 3932 bdev->blocklen = 512; 3933 compare_iov.iov_base = buf; 3934 compare_iov.iov_len = 1024; 3935 3936 /* 3. successful compare with md separated */ 3937 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3938 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3939 3940 g_io_done = false; 3941 g_compare_read_buf = buf; 3942 g_compare_read_buf_len = 1024; 3943 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3944 g_compare_md_buf = md_buf; 3945 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3946 offset, num_blocks, io_done, NULL); 3947 CU_ASSERT_EQUAL(rc, 0); 3948 num_completed = stub_complete_io(1); 3949 CU_ASSERT_EQUAL(num_completed, 1); 3950 CU_ASSERT(g_io_done == true); 3951 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3952 3953 /* 4. miscompare with md separated where md buf is different */ 3954 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3955 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3956 3957 g_io_done = false; 3958 g_compare_read_buf = buf; 3959 g_compare_read_buf_len = 1024; 3960 g_compare_md_buf = md_buf_miscompare; 3961 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3962 offset, num_blocks, io_done, NULL); 3963 CU_ASSERT_EQUAL(rc, 0); 3964 num_completed = stub_complete_io(1); 3965 CU_ASSERT_EQUAL(num_completed, 1); 3966 CU_ASSERT(g_io_done == true); 3967 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3968 3969 /* 5. miscompare with md separated where buf is different */ 3970 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3971 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3972 3973 g_io_done = false; 3974 g_compare_read_buf = buf_miscompare; 3975 g_compare_read_buf_len = sizeof(buf_miscompare); 3976 g_compare_md_buf = md_buf; 3977 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3978 offset, num_blocks, io_done, NULL); 3979 CU_ASSERT_EQUAL(rc, 0); 3980 num_completed = stub_complete_io(1); 3981 CU_ASSERT_EQUAL(num_completed, 1); 3982 CU_ASSERT(g_io_done == true); 3983 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3984 3985 bdev->md_len = 0; 3986 g_compare_md_buf = NULL; 3987 3988 spdk_put_io_channel(ioch); 3989 spdk_bdev_close(desc); 3990 free_bdev(bdev); 3991 fn_table.submit_request = stub_submit_request; 3992 ut_fini_bdev(); 3993 3994 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3995 3996 g_compare_read_buf = NULL; 3997 } 3998 3999 static void 4000 bdev_compare(void) 4001 { 4002 _bdev_compare(false); 4003 _bdev_compare_with_md(false); 4004 } 4005 4006 static void 4007 bdev_compare_emulated(void) 4008 { 4009 _bdev_compare(true); 4010 _bdev_compare_with_md(true); 4011 } 4012 4013 static void 4014 bdev_compare_and_write(void) 4015 { 4016 struct spdk_bdev *bdev; 4017 struct spdk_bdev_desc *desc = NULL; 4018 struct spdk_io_channel *ioch; 4019 struct ut_expected_io *expected_io; 4020 uint64_t offset, num_blocks; 4021 uint32_t num_completed; 4022 char aa_buf[512]; 4023 char bb_buf[512]; 4024 char cc_buf[512]; 4025 char write_buf[512]; 4026 struct iovec compare_iov; 4027 struct iovec write_iov; 4028 int rc; 4029 4030 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4031 memset(bb_buf, 0xbb, sizeof(bb_buf)); 4032 memset(cc_buf, 0xcc, sizeof(cc_buf)); 4033 4034 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 4035 4036 ut_init_bdev(NULL); 4037 fn_table.submit_request = stub_submit_request_get_buf; 4038 bdev = allocate_bdev("bdev"); 4039 4040 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4041 CU_ASSERT_EQUAL(rc, 0); 4042 SPDK_CU_ASSERT_FATAL(desc != NULL); 4043 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4044 ioch = spdk_bdev_get_io_channel(desc); 4045 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4046 4047 fn_table.submit_request = stub_submit_request_get_buf; 4048 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4049 4050 offset = 50; 4051 num_blocks = 1; 4052 compare_iov.iov_base = aa_buf; 4053 compare_iov.iov_len = sizeof(aa_buf); 4054 write_iov.iov_base = bb_buf; 4055 write_iov.iov_len = sizeof(bb_buf); 4056 4057 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4058 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4059 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 4060 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4061 4062 g_io_done = false; 4063 g_compare_read_buf = aa_buf; 4064 g_compare_read_buf_len = sizeof(aa_buf); 4065 memset(write_buf, 0, sizeof(write_buf)); 4066 g_compare_write_buf = write_buf; 4067 g_compare_write_buf_len = sizeof(write_buf); 4068 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4069 offset, num_blocks, io_done, NULL); 4070 /* Trigger range locking */ 4071 poll_threads(); 4072 CU_ASSERT_EQUAL(rc, 0); 4073 num_completed = stub_complete_io(1); 4074 CU_ASSERT_EQUAL(num_completed, 1); 4075 CU_ASSERT(g_io_done == false); 4076 num_completed = stub_complete_io(1); 4077 /* Trigger range unlocking */ 4078 poll_threads(); 4079 CU_ASSERT_EQUAL(num_completed, 1); 4080 CU_ASSERT(g_io_done == true); 4081 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4082 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 4083 4084 /* Test miscompare */ 4085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4087 4088 g_io_done = false; 4089 g_compare_read_buf = cc_buf; 4090 g_compare_read_buf_len = sizeof(cc_buf); 4091 memset(write_buf, 0, sizeof(write_buf)); 4092 g_compare_write_buf = write_buf; 4093 g_compare_write_buf_len = sizeof(write_buf); 4094 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4095 offset, num_blocks, io_done, NULL); 4096 /* Trigger range locking */ 4097 poll_threads(); 4098 CU_ASSERT_EQUAL(rc, 0); 4099 num_completed = stub_complete_io(1); 4100 /* Trigger range unlocking earlier because we expect error here */ 4101 poll_threads(); 4102 CU_ASSERT_EQUAL(num_completed, 1); 4103 CU_ASSERT(g_io_done == true); 4104 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 4105 num_completed = stub_complete_io(1); 4106 CU_ASSERT_EQUAL(num_completed, 0); 4107 4108 spdk_put_io_channel(ioch); 4109 spdk_bdev_close(desc); 4110 free_bdev(bdev); 4111 fn_table.submit_request = stub_submit_request; 4112 ut_fini_bdev(); 4113 4114 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 4115 4116 g_compare_read_buf = NULL; 4117 g_compare_write_buf = NULL; 4118 } 4119 4120 static void 4121 bdev_write_zeroes(void) 4122 { 4123 struct spdk_bdev *bdev; 4124 struct spdk_bdev_desc *desc = NULL; 4125 struct spdk_io_channel *ioch; 4126 struct ut_expected_io *expected_io; 4127 uint64_t offset, num_io_blocks, num_blocks; 4128 uint32_t num_completed, num_requests; 4129 int rc; 4130 4131 ut_init_bdev(NULL); 4132 bdev = allocate_bdev("bdev"); 4133 4134 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4135 CU_ASSERT_EQUAL(rc, 0); 4136 SPDK_CU_ASSERT_FATAL(desc != NULL); 4137 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4138 ioch = spdk_bdev_get_io_channel(desc); 4139 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4140 4141 fn_table.submit_request = stub_submit_request; 4142 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4143 4144 /* First test that if the bdev supports write_zeroes, the request won't be split */ 4145 bdev->md_len = 0; 4146 bdev->blocklen = 4096; 4147 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4148 4149 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4150 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4151 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4152 CU_ASSERT_EQUAL(rc, 0); 4153 num_completed = stub_complete_io(1); 4154 CU_ASSERT_EQUAL(num_completed, 1); 4155 4156 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 4157 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 4158 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4159 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 4160 num_requests = 2; 4161 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 4162 4163 for (offset = 0; offset < num_requests; ++offset) { 4164 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4165 offset * num_io_blocks, num_io_blocks, 0); 4166 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4167 } 4168 4169 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4170 CU_ASSERT_EQUAL(rc, 0); 4171 num_completed = stub_complete_io(num_requests); 4172 CU_ASSERT_EQUAL(num_completed, num_requests); 4173 4174 /* Check that the splitting is correct if bdev has interleaved metadata */ 4175 bdev->md_interleave = true; 4176 bdev->md_len = 64; 4177 bdev->blocklen = 4096 + 64; 4178 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4179 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4180 4181 num_requests = offset = 0; 4182 while (offset < num_blocks) { 4183 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4184 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4185 offset, num_io_blocks, 0); 4186 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4187 offset += num_io_blocks; 4188 num_requests++; 4189 } 4190 4191 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4192 CU_ASSERT_EQUAL(rc, 0); 4193 num_completed = stub_complete_io(num_requests); 4194 CU_ASSERT_EQUAL(num_completed, num_requests); 4195 num_completed = stub_complete_io(num_requests); 4196 assert(num_completed == 0); 4197 4198 /* Check the the same for separate metadata buffer */ 4199 bdev->md_interleave = false; 4200 bdev->md_len = 64; 4201 bdev->blocklen = 4096; 4202 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4203 4204 num_requests = offset = 0; 4205 while (offset < num_blocks) { 4206 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4207 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4208 offset, num_io_blocks, 0); 4209 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4210 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4211 offset += num_io_blocks; 4212 num_requests++; 4213 } 4214 4215 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4216 CU_ASSERT_EQUAL(rc, 0); 4217 num_completed = stub_complete_io(num_requests); 4218 CU_ASSERT_EQUAL(num_completed, num_requests); 4219 4220 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4221 spdk_put_io_channel(ioch); 4222 spdk_bdev_close(desc); 4223 free_bdev(bdev); 4224 ut_fini_bdev(); 4225 } 4226 4227 static void 4228 bdev_zcopy_write(void) 4229 { 4230 struct spdk_bdev *bdev; 4231 struct spdk_bdev_desc *desc = NULL; 4232 struct spdk_io_channel *ioch; 4233 struct ut_expected_io *expected_io; 4234 uint64_t offset, num_blocks; 4235 uint32_t num_completed; 4236 char aa_buf[512]; 4237 struct iovec iov; 4238 int rc; 4239 const bool populate = false; 4240 const bool commit = true; 4241 4242 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4243 4244 ut_init_bdev(NULL); 4245 bdev = allocate_bdev("bdev"); 4246 4247 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4248 CU_ASSERT_EQUAL(rc, 0); 4249 SPDK_CU_ASSERT_FATAL(desc != NULL); 4250 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4251 ioch = spdk_bdev_get_io_channel(desc); 4252 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4253 4254 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4255 4256 offset = 50; 4257 num_blocks = 1; 4258 iov.iov_base = NULL; 4259 iov.iov_len = 0; 4260 4261 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4262 g_zcopy_read_buf_len = (uint32_t) -1; 4263 /* Do a zcopy start for a write (populate=false) */ 4264 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4265 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4266 g_io_done = false; 4267 g_zcopy_write_buf = aa_buf; 4268 g_zcopy_write_buf_len = sizeof(aa_buf); 4269 g_zcopy_bdev_io = NULL; 4270 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4271 CU_ASSERT_EQUAL(rc, 0); 4272 num_completed = stub_complete_io(1); 4273 CU_ASSERT_EQUAL(num_completed, 1); 4274 CU_ASSERT(g_io_done == true); 4275 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4276 /* Check that the iov has been set up */ 4277 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4278 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4279 /* Check that the bdev_io has been saved */ 4280 CU_ASSERT(g_zcopy_bdev_io != NULL); 4281 /* Now do the zcopy end for a write (commit=true) */ 4282 g_io_done = false; 4283 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4284 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4285 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4286 CU_ASSERT_EQUAL(rc, 0); 4287 num_completed = stub_complete_io(1); 4288 CU_ASSERT_EQUAL(num_completed, 1); 4289 CU_ASSERT(g_io_done == true); 4290 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4291 /* Check the g_zcopy are reset by io_done */ 4292 CU_ASSERT(g_zcopy_write_buf == NULL); 4293 CU_ASSERT(g_zcopy_write_buf_len == 0); 4294 /* Check that io_done has freed the g_zcopy_bdev_io */ 4295 CU_ASSERT(g_zcopy_bdev_io == NULL); 4296 4297 /* Check the zcopy read buffer has not been touched which 4298 * ensures that the correct buffers were used. 4299 */ 4300 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4301 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4302 4303 spdk_put_io_channel(ioch); 4304 spdk_bdev_close(desc); 4305 free_bdev(bdev); 4306 ut_fini_bdev(); 4307 } 4308 4309 static void 4310 bdev_zcopy_read(void) 4311 { 4312 struct spdk_bdev *bdev; 4313 struct spdk_bdev_desc *desc = NULL; 4314 struct spdk_io_channel *ioch; 4315 struct ut_expected_io *expected_io; 4316 uint64_t offset, num_blocks; 4317 uint32_t num_completed; 4318 char aa_buf[512]; 4319 struct iovec iov; 4320 int rc; 4321 const bool populate = true; 4322 const bool commit = false; 4323 4324 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4325 4326 ut_init_bdev(NULL); 4327 bdev = allocate_bdev("bdev"); 4328 4329 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4330 CU_ASSERT_EQUAL(rc, 0); 4331 SPDK_CU_ASSERT_FATAL(desc != NULL); 4332 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4333 ioch = spdk_bdev_get_io_channel(desc); 4334 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4335 4336 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4337 4338 offset = 50; 4339 num_blocks = 1; 4340 iov.iov_base = NULL; 4341 iov.iov_len = 0; 4342 4343 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4344 g_zcopy_write_buf_len = (uint32_t) -1; 4345 4346 /* Do a zcopy start for a read (populate=true) */ 4347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4349 g_io_done = false; 4350 g_zcopy_read_buf = aa_buf; 4351 g_zcopy_read_buf_len = sizeof(aa_buf); 4352 g_zcopy_bdev_io = NULL; 4353 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4354 CU_ASSERT_EQUAL(rc, 0); 4355 num_completed = stub_complete_io(1); 4356 CU_ASSERT_EQUAL(num_completed, 1); 4357 CU_ASSERT(g_io_done == true); 4358 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4359 /* Check that the iov has been set up */ 4360 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4361 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4362 /* Check that the bdev_io has been saved */ 4363 CU_ASSERT(g_zcopy_bdev_io != NULL); 4364 4365 /* Now do the zcopy end for a read (commit=false) */ 4366 g_io_done = false; 4367 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4368 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4369 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4370 CU_ASSERT_EQUAL(rc, 0); 4371 num_completed = stub_complete_io(1); 4372 CU_ASSERT_EQUAL(num_completed, 1); 4373 CU_ASSERT(g_io_done == true); 4374 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4375 /* Check the g_zcopy are reset by io_done */ 4376 CU_ASSERT(g_zcopy_read_buf == NULL); 4377 CU_ASSERT(g_zcopy_read_buf_len == 0); 4378 /* Check that io_done has freed the g_zcopy_bdev_io */ 4379 CU_ASSERT(g_zcopy_bdev_io == NULL); 4380 4381 /* Check the zcopy write buffer has not been touched which 4382 * ensures that the correct buffers were used. 4383 */ 4384 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4385 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4386 4387 spdk_put_io_channel(ioch); 4388 spdk_bdev_close(desc); 4389 free_bdev(bdev); 4390 ut_fini_bdev(); 4391 } 4392 4393 static void 4394 bdev_open_while_hotremove(void) 4395 { 4396 struct spdk_bdev *bdev; 4397 struct spdk_bdev_desc *desc[2] = {}; 4398 int rc; 4399 4400 bdev = allocate_bdev("bdev"); 4401 4402 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4403 CU_ASSERT(rc == 0); 4404 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4405 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4406 4407 spdk_bdev_unregister(bdev, NULL, NULL); 4408 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4409 poll_threads(); 4410 4411 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4412 CU_ASSERT(rc == -ENODEV); 4413 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4414 4415 spdk_bdev_close(desc[0]); 4416 free_bdev(bdev); 4417 } 4418 4419 static void 4420 bdev_close_while_hotremove(void) 4421 { 4422 struct spdk_bdev *bdev; 4423 struct spdk_bdev_desc *desc = NULL; 4424 int rc = 0; 4425 4426 bdev = allocate_bdev("bdev"); 4427 4428 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4429 CU_ASSERT_EQUAL(rc, 0); 4430 SPDK_CU_ASSERT_FATAL(desc != NULL); 4431 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4432 4433 /* Simulate hot-unplug by unregistering bdev */ 4434 g_event_type1 = 0xFF; 4435 g_unregister_arg = NULL; 4436 g_unregister_rc = -1; 4437 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4438 /* Close device while remove event is in flight */ 4439 spdk_bdev_close(desc); 4440 4441 /* Ensure that unregister callback is delayed */ 4442 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4443 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4444 4445 poll_threads(); 4446 4447 /* Event callback shall not be issued because device was closed */ 4448 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4449 /* Unregister callback is issued */ 4450 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4451 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4452 4453 free_bdev(bdev); 4454 } 4455 4456 static void 4457 bdev_open_ext_test(void) 4458 { 4459 struct spdk_bdev *bdev; 4460 struct spdk_bdev_desc *desc1 = NULL; 4461 struct spdk_bdev_desc *desc2 = NULL; 4462 int rc = 0; 4463 4464 bdev = allocate_bdev("bdev"); 4465 4466 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4467 CU_ASSERT_EQUAL(rc, -EINVAL); 4468 4469 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4470 CU_ASSERT_EQUAL(rc, 0); 4471 4472 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4473 CU_ASSERT_EQUAL(rc, 0); 4474 4475 g_event_type1 = 0xFF; 4476 g_event_type2 = 0xFF; 4477 4478 /* Simulate hot-unplug by unregistering bdev */ 4479 spdk_bdev_unregister(bdev, NULL, NULL); 4480 poll_threads(); 4481 4482 /* Check if correct events have been triggered in event callback fn */ 4483 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4484 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4485 4486 free_bdev(bdev); 4487 poll_threads(); 4488 } 4489 4490 static void 4491 bdev_open_ext_unregister(void) 4492 { 4493 struct spdk_bdev *bdev; 4494 struct spdk_bdev_desc *desc1 = NULL; 4495 struct spdk_bdev_desc *desc2 = NULL; 4496 struct spdk_bdev_desc *desc3 = NULL; 4497 struct spdk_bdev_desc *desc4 = NULL; 4498 int rc = 0; 4499 4500 bdev = allocate_bdev("bdev"); 4501 4502 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4503 CU_ASSERT_EQUAL(rc, -EINVAL); 4504 4505 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4506 CU_ASSERT_EQUAL(rc, 0); 4507 4508 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4509 CU_ASSERT_EQUAL(rc, 0); 4510 4511 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4512 CU_ASSERT_EQUAL(rc, 0); 4513 4514 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4515 CU_ASSERT_EQUAL(rc, 0); 4516 4517 g_event_type1 = 0xFF; 4518 g_event_type2 = 0xFF; 4519 g_event_type3 = 0xFF; 4520 g_event_type4 = 0xFF; 4521 4522 g_unregister_arg = NULL; 4523 g_unregister_rc = -1; 4524 4525 /* Simulate hot-unplug by unregistering bdev */ 4526 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4527 4528 /* 4529 * Unregister is handled asynchronously and event callback 4530 * (i.e., above bdev_open_cbN) will be called. 4531 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4532 * close the desc3 and desc4 so that the bdev is not closed. 4533 */ 4534 poll_threads(); 4535 4536 /* Check if correct events have been triggered in event callback fn */ 4537 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4538 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4539 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4540 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4541 4542 /* Check that unregister callback is delayed */ 4543 CU_ASSERT(g_unregister_arg == NULL); 4544 CU_ASSERT(g_unregister_rc == -1); 4545 4546 /* 4547 * Explicitly close desc3. As desc4 is still opened there, the 4548 * unergister callback is still delayed to execute. 4549 */ 4550 spdk_bdev_close(desc3); 4551 CU_ASSERT(g_unregister_arg == NULL); 4552 CU_ASSERT(g_unregister_rc == -1); 4553 4554 /* 4555 * Explicitly close desc4 to trigger the ongoing bdev unregister 4556 * operation after last desc is closed. 4557 */ 4558 spdk_bdev_close(desc4); 4559 4560 /* Poll the thread for the async unregister operation */ 4561 poll_threads(); 4562 4563 /* Check that unregister callback is executed */ 4564 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4565 CU_ASSERT(g_unregister_rc == 0); 4566 4567 free_bdev(bdev); 4568 poll_threads(); 4569 } 4570 4571 struct timeout_io_cb_arg { 4572 struct iovec iov; 4573 uint8_t type; 4574 }; 4575 4576 static int 4577 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4578 { 4579 struct spdk_bdev_io *bdev_io; 4580 int n = 0; 4581 4582 if (!ch) { 4583 return -1; 4584 } 4585 4586 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4587 n++; 4588 } 4589 4590 return n; 4591 } 4592 4593 static void 4594 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4595 { 4596 struct timeout_io_cb_arg *ctx = cb_arg; 4597 4598 ctx->type = bdev_io->type; 4599 ctx->iov.iov_base = bdev_io->iov.iov_base; 4600 ctx->iov.iov_len = bdev_io->iov.iov_len; 4601 } 4602 4603 static void 4604 bdev_set_io_timeout(void) 4605 { 4606 struct spdk_bdev *bdev; 4607 struct spdk_bdev_desc *desc = NULL; 4608 struct spdk_io_channel *io_ch = NULL; 4609 struct spdk_bdev_channel *bdev_ch = NULL; 4610 struct timeout_io_cb_arg cb_arg; 4611 4612 ut_init_bdev(NULL); 4613 bdev = allocate_bdev("bdev"); 4614 4615 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4616 SPDK_CU_ASSERT_FATAL(desc != NULL); 4617 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4618 4619 io_ch = spdk_bdev_get_io_channel(desc); 4620 CU_ASSERT(io_ch != NULL); 4621 4622 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4623 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4624 4625 /* This is the part1. 4626 * We will check the bdev_ch->io_submitted list 4627 * TO make sure that it can link IOs and only the user submitted IOs 4628 */ 4629 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4630 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4631 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4632 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4633 stub_complete_io(1); 4634 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4635 stub_complete_io(1); 4636 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4637 4638 /* Split IO */ 4639 bdev->optimal_io_boundary = 16; 4640 bdev->split_on_optimal_io_boundary = true; 4641 4642 /* Now test that a single-vector command is split correctly. 4643 * Offset 14, length 8, payload 0xF000 4644 * Child - Offset 14, length 2, payload 0xF000 4645 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4646 * 4647 * Set up the expected values before calling spdk_bdev_read_blocks 4648 */ 4649 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4650 /* We count all submitted IOs including IO that are generated by splitting. */ 4651 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4652 stub_complete_io(1); 4653 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4654 stub_complete_io(1); 4655 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4656 4657 /* Also include the reset IO */ 4658 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4659 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4660 poll_threads(); 4661 stub_complete_io(1); 4662 poll_threads(); 4663 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4664 4665 /* This is part2 4666 * Test the desc timeout poller register 4667 */ 4668 4669 /* Successfully set the timeout */ 4670 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4671 CU_ASSERT(desc->io_timeout_poller != NULL); 4672 CU_ASSERT(desc->timeout_in_sec == 30); 4673 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4674 CU_ASSERT(desc->cb_arg == &cb_arg); 4675 4676 /* Change the timeout limit */ 4677 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4678 CU_ASSERT(desc->io_timeout_poller != NULL); 4679 CU_ASSERT(desc->timeout_in_sec == 20); 4680 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4681 CU_ASSERT(desc->cb_arg == &cb_arg); 4682 4683 /* Disable the timeout */ 4684 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4685 CU_ASSERT(desc->io_timeout_poller == NULL); 4686 4687 /* This the part3 4688 * We will test to catch timeout IO and check whether the IO is 4689 * the submitted one. 4690 */ 4691 memset(&cb_arg, 0, sizeof(cb_arg)); 4692 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4693 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4694 4695 /* Don't reach the limit */ 4696 spdk_delay_us(15 * spdk_get_ticks_hz()); 4697 poll_threads(); 4698 CU_ASSERT(cb_arg.type == 0); 4699 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4700 CU_ASSERT(cb_arg.iov.iov_len == 0); 4701 4702 /* 15 + 15 = 30 reach the limit */ 4703 spdk_delay_us(15 * spdk_get_ticks_hz()); 4704 poll_threads(); 4705 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4706 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4707 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4708 stub_complete_io(1); 4709 4710 /* Use the same split IO above and check the IO */ 4711 memset(&cb_arg, 0, sizeof(cb_arg)); 4712 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4713 4714 /* The first child complete in time */ 4715 spdk_delay_us(15 * spdk_get_ticks_hz()); 4716 poll_threads(); 4717 stub_complete_io(1); 4718 CU_ASSERT(cb_arg.type == 0); 4719 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4720 CU_ASSERT(cb_arg.iov.iov_len == 0); 4721 4722 /* The second child reach the limit */ 4723 spdk_delay_us(15 * spdk_get_ticks_hz()); 4724 poll_threads(); 4725 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4726 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4727 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4728 stub_complete_io(1); 4729 4730 /* Also include the reset IO */ 4731 memset(&cb_arg, 0, sizeof(cb_arg)); 4732 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4733 spdk_delay_us(30 * spdk_get_ticks_hz()); 4734 poll_threads(); 4735 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4736 stub_complete_io(1); 4737 poll_threads(); 4738 4739 spdk_put_io_channel(io_ch); 4740 spdk_bdev_close(desc); 4741 free_bdev(bdev); 4742 ut_fini_bdev(); 4743 } 4744 4745 static void 4746 bdev_set_qd_sampling(void) 4747 { 4748 struct spdk_bdev *bdev; 4749 struct spdk_bdev_desc *desc = NULL; 4750 struct spdk_io_channel *io_ch = NULL; 4751 struct spdk_bdev_channel *bdev_ch = NULL; 4752 struct timeout_io_cb_arg cb_arg; 4753 4754 ut_init_bdev(NULL); 4755 bdev = allocate_bdev("bdev"); 4756 4757 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4758 SPDK_CU_ASSERT_FATAL(desc != NULL); 4759 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4760 4761 io_ch = spdk_bdev_get_io_channel(desc); 4762 CU_ASSERT(io_ch != NULL); 4763 4764 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4765 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4766 4767 /* This is the part1. 4768 * We will check the bdev_ch->io_submitted list 4769 * TO make sure that it can link IOs and only the user submitted IOs 4770 */ 4771 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4772 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4773 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4774 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4775 stub_complete_io(1); 4776 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4777 stub_complete_io(1); 4778 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4779 4780 /* This is the part2. 4781 * Test the bdev's qd poller register 4782 */ 4783 /* 1st Successfully set the qd sampling period */ 4784 spdk_bdev_set_qd_sampling_period(bdev, 10); 4785 CU_ASSERT(bdev->internal.new_period == 10); 4786 CU_ASSERT(bdev->internal.period == 10); 4787 CU_ASSERT(bdev->internal.qd_desc != NULL); 4788 poll_threads(); 4789 CU_ASSERT(bdev->internal.qd_poller != NULL); 4790 4791 /* 2nd Change the qd sampling period */ 4792 spdk_bdev_set_qd_sampling_period(bdev, 20); 4793 CU_ASSERT(bdev->internal.new_period == 20); 4794 CU_ASSERT(bdev->internal.period == 10); 4795 CU_ASSERT(bdev->internal.qd_desc != NULL); 4796 poll_threads(); 4797 CU_ASSERT(bdev->internal.qd_poller != NULL); 4798 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4799 4800 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4801 spdk_delay_us(20); 4802 poll_thread_times(0, 1); 4803 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4804 spdk_bdev_set_qd_sampling_period(bdev, 30); 4805 CU_ASSERT(bdev->internal.new_period == 30); 4806 CU_ASSERT(bdev->internal.period == 20); 4807 poll_threads(); 4808 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4809 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4810 4811 /* 4th Disable the qd sampling period */ 4812 spdk_bdev_set_qd_sampling_period(bdev, 0); 4813 CU_ASSERT(bdev->internal.new_period == 0); 4814 CU_ASSERT(bdev->internal.period == 30); 4815 poll_threads(); 4816 CU_ASSERT(bdev->internal.qd_poller == NULL); 4817 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4818 CU_ASSERT(bdev->internal.qd_desc == NULL); 4819 4820 /* This is the part3. 4821 * We will test the submitted IO and reset works 4822 * properly with the qd sampling. 4823 */ 4824 memset(&cb_arg, 0, sizeof(cb_arg)); 4825 spdk_bdev_set_qd_sampling_period(bdev, 1); 4826 poll_threads(); 4827 4828 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4829 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4830 4831 /* Also include the reset IO */ 4832 memset(&cb_arg, 0, sizeof(cb_arg)); 4833 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4834 poll_threads(); 4835 4836 /* Close the desc */ 4837 spdk_put_io_channel(io_ch); 4838 spdk_bdev_close(desc); 4839 4840 /* Complete the submitted IO and reset */ 4841 stub_complete_io(2); 4842 poll_threads(); 4843 4844 free_bdev(bdev); 4845 ut_fini_bdev(); 4846 } 4847 4848 static void 4849 lba_range_overlap(void) 4850 { 4851 struct lba_range r1, r2; 4852 4853 r1.offset = 100; 4854 r1.length = 50; 4855 4856 r2.offset = 0; 4857 r2.length = 1; 4858 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4859 4860 r2.offset = 0; 4861 r2.length = 100; 4862 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4863 4864 r2.offset = 0; 4865 r2.length = 110; 4866 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4867 4868 r2.offset = 100; 4869 r2.length = 10; 4870 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4871 4872 r2.offset = 110; 4873 r2.length = 20; 4874 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4875 4876 r2.offset = 140; 4877 r2.length = 150; 4878 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4879 4880 r2.offset = 130; 4881 r2.length = 200; 4882 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4883 4884 r2.offset = 150; 4885 r2.length = 100; 4886 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4887 4888 r2.offset = 110; 4889 r2.length = 0; 4890 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4891 } 4892 4893 static bool g_lock_lba_range_done; 4894 static bool g_unlock_lba_range_done; 4895 4896 static void 4897 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 4898 { 4899 g_lock_lba_range_done = true; 4900 } 4901 4902 static void 4903 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 4904 { 4905 g_unlock_lba_range_done = true; 4906 } 4907 4908 static void 4909 lock_lba_range_check_ranges(void) 4910 { 4911 struct spdk_bdev *bdev; 4912 struct spdk_bdev_desc *desc = NULL; 4913 struct spdk_io_channel *io_ch; 4914 struct spdk_bdev_channel *channel; 4915 struct lba_range *range; 4916 int ctx1; 4917 int rc; 4918 4919 ut_init_bdev(NULL); 4920 bdev = allocate_bdev("bdev0"); 4921 4922 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4923 CU_ASSERT(rc == 0); 4924 CU_ASSERT(desc != NULL); 4925 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4926 io_ch = spdk_bdev_get_io_channel(desc); 4927 CU_ASSERT(io_ch != NULL); 4928 channel = spdk_io_channel_get_ctx(io_ch); 4929 4930 g_lock_lba_range_done = false; 4931 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4932 CU_ASSERT(rc == 0); 4933 poll_threads(); 4934 4935 CU_ASSERT(g_lock_lba_range_done == true); 4936 range = TAILQ_FIRST(&channel->locked_ranges); 4937 SPDK_CU_ASSERT_FATAL(range != NULL); 4938 CU_ASSERT(range->offset == 20); 4939 CU_ASSERT(range->length == 10); 4940 CU_ASSERT(range->owner_ch == channel); 4941 4942 /* Unlocks must exactly match a lock. */ 4943 g_unlock_lba_range_done = false; 4944 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4945 CU_ASSERT(rc == -EINVAL); 4946 CU_ASSERT(g_unlock_lba_range_done == false); 4947 4948 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4949 CU_ASSERT(rc == 0); 4950 spdk_delay_us(100); 4951 poll_threads(); 4952 4953 CU_ASSERT(g_unlock_lba_range_done == true); 4954 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4955 4956 spdk_put_io_channel(io_ch); 4957 spdk_bdev_close(desc); 4958 free_bdev(bdev); 4959 ut_fini_bdev(); 4960 } 4961 4962 static void 4963 lock_lba_range_with_io_outstanding(void) 4964 { 4965 struct spdk_bdev *bdev; 4966 struct spdk_bdev_desc *desc = NULL; 4967 struct spdk_io_channel *io_ch; 4968 struct spdk_bdev_channel *channel; 4969 struct lba_range *range; 4970 char buf[4096]; 4971 int ctx1; 4972 int rc; 4973 4974 ut_init_bdev(NULL); 4975 bdev = allocate_bdev("bdev0"); 4976 4977 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4978 CU_ASSERT(rc == 0); 4979 CU_ASSERT(desc != NULL); 4980 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4981 io_ch = spdk_bdev_get_io_channel(desc); 4982 CU_ASSERT(io_ch != NULL); 4983 channel = spdk_io_channel_get_ctx(io_ch); 4984 4985 g_io_done = false; 4986 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4987 CU_ASSERT(rc == 0); 4988 4989 g_lock_lba_range_done = false; 4990 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4991 CU_ASSERT(rc == 0); 4992 poll_threads(); 4993 4994 /* The lock should immediately become valid, since there are no outstanding 4995 * write I/O. 4996 */ 4997 CU_ASSERT(g_io_done == false); 4998 CU_ASSERT(g_lock_lba_range_done == true); 4999 range = TAILQ_FIRST(&channel->locked_ranges); 5000 SPDK_CU_ASSERT_FATAL(range != NULL); 5001 CU_ASSERT(range->offset == 20); 5002 CU_ASSERT(range->length == 10); 5003 CU_ASSERT(range->owner_ch == channel); 5004 CU_ASSERT(range->locked_ctx == &ctx1); 5005 5006 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5007 CU_ASSERT(rc == 0); 5008 stub_complete_io(1); 5009 spdk_delay_us(100); 5010 poll_threads(); 5011 5012 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5013 5014 /* Now try again, but with a write I/O. */ 5015 g_io_done = false; 5016 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 5017 CU_ASSERT(rc == 0); 5018 5019 g_lock_lba_range_done = false; 5020 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5021 CU_ASSERT(rc == 0); 5022 poll_threads(); 5023 5024 /* The lock should not be fully valid yet, since a write I/O is outstanding. 5025 * But note that the range should be on the channel's locked_list, to make sure no 5026 * new write I/O are started. 5027 */ 5028 CU_ASSERT(g_io_done == false); 5029 CU_ASSERT(g_lock_lba_range_done == false); 5030 range = TAILQ_FIRST(&channel->locked_ranges); 5031 SPDK_CU_ASSERT_FATAL(range != NULL); 5032 CU_ASSERT(range->offset == 20); 5033 CU_ASSERT(range->length == 10); 5034 5035 /* Complete the write I/O. This should make the lock valid (checked by confirming 5036 * our callback was invoked). 5037 */ 5038 stub_complete_io(1); 5039 spdk_delay_us(100); 5040 poll_threads(); 5041 CU_ASSERT(g_io_done == true); 5042 CU_ASSERT(g_lock_lba_range_done == true); 5043 5044 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5045 CU_ASSERT(rc == 0); 5046 poll_threads(); 5047 5048 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5049 5050 spdk_put_io_channel(io_ch); 5051 spdk_bdev_close(desc); 5052 free_bdev(bdev); 5053 ut_fini_bdev(); 5054 } 5055 5056 static void 5057 lock_lba_range_overlapped(void) 5058 { 5059 struct spdk_bdev *bdev; 5060 struct spdk_bdev_desc *desc = NULL; 5061 struct spdk_io_channel *io_ch; 5062 struct spdk_bdev_channel *channel; 5063 struct lba_range *range; 5064 int ctx1; 5065 int rc; 5066 5067 ut_init_bdev(NULL); 5068 bdev = allocate_bdev("bdev0"); 5069 5070 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5071 CU_ASSERT(rc == 0); 5072 CU_ASSERT(desc != NULL); 5073 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5074 io_ch = spdk_bdev_get_io_channel(desc); 5075 CU_ASSERT(io_ch != NULL); 5076 channel = spdk_io_channel_get_ctx(io_ch); 5077 5078 /* Lock range 20-29. */ 5079 g_lock_lba_range_done = false; 5080 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5081 CU_ASSERT(rc == 0); 5082 poll_threads(); 5083 5084 CU_ASSERT(g_lock_lba_range_done == true); 5085 range = TAILQ_FIRST(&channel->locked_ranges); 5086 SPDK_CU_ASSERT_FATAL(range != NULL); 5087 CU_ASSERT(range->offset == 20); 5088 CU_ASSERT(range->length == 10); 5089 5090 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 5091 * 20-29. 5092 */ 5093 g_lock_lba_range_done = false; 5094 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 5095 CU_ASSERT(rc == 0); 5096 poll_threads(); 5097 5098 CU_ASSERT(g_lock_lba_range_done == false); 5099 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5100 SPDK_CU_ASSERT_FATAL(range != NULL); 5101 CU_ASSERT(range->offset == 25); 5102 CU_ASSERT(range->length == 15); 5103 5104 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 5105 * no longer overlaps with an active lock. 5106 */ 5107 g_unlock_lba_range_done = false; 5108 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5109 CU_ASSERT(rc == 0); 5110 poll_threads(); 5111 5112 CU_ASSERT(g_unlock_lba_range_done == true); 5113 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5114 range = TAILQ_FIRST(&channel->locked_ranges); 5115 SPDK_CU_ASSERT_FATAL(range != NULL); 5116 CU_ASSERT(range->offset == 25); 5117 CU_ASSERT(range->length == 15); 5118 5119 /* Lock 40-59. This should immediately lock since it does not overlap with the 5120 * currently active 25-39 lock. 5121 */ 5122 g_lock_lba_range_done = false; 5123 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 5124 CU_ASSERT(rc == 0); 5125 poll_threads(); 5126 5127 CU_ASSERT(g_lock_lba_range_done == true); 5128 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5129 SPDK_CU_ASSERT_FATAL(range != NULL); 5130 range = TAILQ_NEXT(range, tailq); 5131 SPDK_CU_ASSERT_FATAL(range != NULL); 5132 CU_ASSERT(range->offset == 40); 5133 CU_ASSERT(range->length == 20); 5134 5135 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 5136 g_lock_lba_range_done = false; 5137 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 5138 CU_ASSERT(rc == 0); 5139 poll_threads(); 5140 5141 CU_ASSERT(g_lock_lba_range_done == false); 5142 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5143 SPDK_CU_ASSERT_FATAL(range != NULL); 5144 CU_ASSERT(range->offset == 35); 5145 CU_ASSERT(range->length == 10); 5146 5147 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 5148 * the 40-59 lock is still active. 5149 */ 5150 g_unlock_lba_range_done = false; 5151 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 5152 CU_ASSERT(rc == 0); 5153 poll_threads(); 5154 5155 CU_ASSERT(g_unlock_lba_range_done == true); 5156 CU_ASSERT(g_lock_lba_range_done == false); 5157 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5158 SPDK_CU_ASSERT_FATAL(range != NULL); 5159 CU_ASSERT(range->offset == 35); 5160 CU_ASSERT(range->length == 10); 5161 5162 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 5163 * no longer any active overlapping locks. 5164 */ 5165 g_unlock_lba_range_done = false; 5166 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 5167 CU_ASSERT(rc == 0); 5168 poll_threads(); 5169 5170 CU_ASSERT(g_unlock_lba_range_done == true); 5171 CU_ASSERT(g_lock_lba_range_done == true); 5172 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5173 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5174 SPDK_CU_ASSERT_FATAL(range != NULL); 5175 CU_ASSERT(range->offset == 35); 5176 CU_ASSERT(range->length == 10); 5177 5178 /* Finally, unlock 35-44. */ 5179 g_unlock_lba_range_done = false; 5180 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5181 CU_ASSERT(rc == 0); 5182 poll_threads(); 5183 5184 CU_ASSERT(g_unlock_lba_range_done == true); 5185 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5186 5187 spdk_put_io_channel(io_ch); 5188 spdk_bdev_close(desc); 5189 free_bdev(bdev); 5190 ut_fini_bdev(); 5191 } 5192 5193 static void 5194 bdev_quiesce_done(void *ctx, int status) 5195 { 5196 g_lock_lba_range_done = true; 5197 } 5198 5199 static void 5200 bdev_unquiesce_done(void *ctx, int status) 5201 { 5202 g_unlock_lba_range_done = true; 5203 } 5204 5205 static void 5206 bdev_quiesce_done_unquiesce(void *ctx, int status) 5207 { 5208 struct spdk_bdev *bdev = ctx; 5209 int rc; 5210 5211 g_lock_lba_range_done = true; 5212 5213 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL); 5214 CU_ASSERT(rc == 0); 5215 } 5216 5217 static void 5218 bdev_quiesce(void) 5219 { 5220 struct spdk_bdev *bdev; 5221 struct spdk_bdev_desc *desc = NULL; 5222 struct spdk_io_channel *io_ch; 5223 struct spdk_bdev_channel *channel; 5224 struct lba_range *range; 5225 struct spdk_bdev_io *bdev_io; 5226 int ctx1; 5227 int rc; 5228 5229 ut_init_bdev(NULL); 5230 bdev = allocate_bdev("bdev0"); 5231 5232 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5233 CU_ASSERT(rc == 0); 5234 CU_ASSERT(desc != NULL); 5235 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5236 io_ch = spdk_bdev_get_io_channel(desc); 5237 CU_ASSERT(io_ch != NULL); 5238 channel = spdk_io_channel_get_ctx(io_ch); 5239 5240 g_lock_lba_range_done = false; 5241 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5242 CU_ASSERT(rc == 0); 5243 poll_threads(); 5244 5245 CU_ASSERT(g_lock_lba_range_done == true); 5246 range = TAILQ_FIRST(&channel->locked_ranges); 5247 SPDK_CU_ASSERT_FATAL(range != NULL); 5248 CU_ASSERT(range->offset == 0); 5249 CU_ASSERT(range->length == bdev->blockcnt); 5250 CU_ASSERT(range->owner_ch == NULL); 5251 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5252 SPDK_CU_ASSERT_FATAL(range != NULL); 5253 CU_ASSERT(range->offset == 0); 5254 CU_ASSERT(range->length == bdev->blockcnt); 5255 CU_ASSERT(range->owner_ch == NULL); 5256 5257 g_unlock_lba_range_done = false; 5258 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5259 CU_ASSERT(rc == 0); 5260 spdk_delay_us(100); 5261 poll_threads(); 5262 5263 CU_ASSERT(g_unlock_lba_range_done == true); 5264 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5265 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5266 5267 g_lock_lba_range_done = false; 5268 rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1); 5269 CU_ASSERT(rc == 0); 5270 poll_threads(); 5271 5272 CU_ASSERT(g_lock_lba_range_done == true); 5273 range = TAILQ_FIRST(&channel->locked_ranges); 5274 SPDK_CU_ASSERT_FATAL(range != NULL); 5275 CU_ASSERT(range->offset == 20); 5276 CU_ASSERT(range->length == 10); 5277 CU_ASSERT(range->owner_ch == NULL); 5278 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5279 SPDK_CU_ASSERT_FATAL(range != NULL); 5280 CU_ASSERT(range->offset == 20); 5281 CU_ASSERT(range->length == 10); 5282 CU_ASSERT(range->owner_ch == NULL); 5283 5284 /* Unlocks must exactly match a lock. */ 5285 g_unlock_lba_range_done = false; 5286 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1); 5287 CU_ASSERT(rc == -EINVAL); 5288 CU_ASSERT(g_unlock_lba_range_done == false); 5289 5290 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1); 5291 CU_ASSERT(rc == 0); 5292 spdk_delay_us(100); 5293 poll_threads(); 5294 5295 CU_ASSERT(g_unlock_lba_range_done == true); 5296 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5297 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5298 5299 /* Test unquiesce from quiesce cb */ 5300 g_lock_lba_range_done = false; 5301 g_unlock_lba_range_done = false; 5302 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev); 5303 CU_ASSERT(rc == 0); 5304 poll_threads(); 5305 5306 CU_ASSERT(g_lock_lba_range_done == true); 5307 CU_ASSERT(g_unlock_lba_range_done == true); 5308 5309 /* Test quiesce with read I/O */ 5310 g_lock_lba_range_done = false; 5311 g_unlock_lba_range_done = false; 5312 g_io_done = false; 5313 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5314 CU_ASSERT(rc == 0); 5315 5316 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5317 CU_ASSERT(rc == 0); 5318 poll_threads(); 5319 5320 CU_ASSERT(g_io_done == false); 5321 CU_ASSERT(g_lock_lba_range_done == false); 5322 range = TAILQ_FIRST(&channel->locked_ranges); 5323 SPDK_CU_ASSERT_FATAL(range != NULL); 5324 5325 stub_complete_io(1); 5326 spdk_delay_us(100); 5327 poll_threads(); 5328 CU_ASSERT(g_io_done == true); 5329 CU_ASSERT(g_lock_lba_range_done == true); 5330 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5331 5332 g_io_done = false; 5333 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5334 CU_ASSERT(rc == 0); 5335 5336 bdev_io = TAILQ_FIRST(&channel->io_locked); 5337 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 5338 CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20); 5339 CU_ASSERT(bdev_io->u.bdev.num_blocks == 1); 5340 5341 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5342 CU_ASSERT(rc == 0); 5343 spdk_delay_us(100); 5344 poll_threads(); 5345 5346 CU_ASSERT(g_unlock_lba_range_done == true); 5347 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5348 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5349 5350 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5351 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 5352 poll_threads(); 5353 CU_ASSERT(g_io_done == true); 5354 5355 spdk_put_io_channel(io_ch); 5356 spdk_bdev_close(desc); 5357 free_bdev(bdev); 5358 ut_fini_bdev(); 5359 } 5360 5361 static void 5362 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5363 { 5364 g_abort_done = true; 5365 g_abort_status = bdev_io->internal.status; 5366 spdk_bdev_free_io(bdev_io); 5367 } 5368 5369 static void 5370 bdev_io_abort(void) 5371 { 5372 struct spdk_bdev *bdev; 5373 struct spdk_bdev_desc *desc = NULL; 5374 struct spdk_io_channel *io_ch; 5375 struct spdk_bdev_channel *channel; 5376 struct spdk_bdev_mgmt_channel *mgmt_ch; 5377 struct spdk_bdev_opts bdev_opts = {}; 5378 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5379 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5380 int rc; 5381 5382 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5383 bdev_opts.bdev_io_pool_size = 7; 5384 bdev_opts.bdev_io_cache_size = 2; 5385 ut_init_bdev(&bdev_opts); 5386 5387 bdev = allocate_bdev("bdev0"); 5388 5389 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5390 CU_ASSERT(rc == 0); 5391 CU_ASSERT(desc != NULL); 5392 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5393 io_ch = spdk_bdev_get_io_channel(desc); 5394 CU_ASSERT(io_ch != NULL); 5395 channel = spdk_io_channel_get_ctx(io_ch); 5396 mgmt_ch = channel->shared_resource->mgmt_ch; 5397 5398 g_abort_done = false; 5399 5400 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5401 5402 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5403 CU_ASSERT(rc == -ENOTSUP); 5404 5405 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5406 5407 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5408 CU_ASSERT(rc == 0); 5409 CU_ASSERT(g_abort_done == true); 5410 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5411 5412 /* Test the case that the target I/O was successfully aborted. */ 5413 g_io_done = false; 5414 5415 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5416 CU_ASSERT(rc == 0); 5417 CU_ASSERT(g_io_done == false); 5418 5419 g_abort_done = false; 5420 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5421 5422 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5423 CU_ASSERT(rc == 0); 5424 CU_ASSERT(g_io_done == true); 5425 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5426 stub_complete_io(1); 5427 CU_ASSERT(g_abort_done == true); 5428 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5429 5430 /* Test the case that the target I/O was not aborted because it completed 5431 * in the middle of execution of the abort. 5432 */ 5433 g_io_done = false; 5434 5435 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5436 CU_ASSERT(rc == 0); 5437 CU_ASSERT(g_io_done == false); 5438 5439 g_abort_done = false; 5440 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5441 5442 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5443 CU_ASSERT(rc == 0); 5444 CU_ASSERT(g_io_done == false); 5445 5446 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5447 stub_complete_io(1); 5448 CU_ASSERT(g_io_done == true); 5449 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5450 5451 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5452 stub_complete_io(1); 5453 CU_ASSERT(g_abort_done == true); 5454 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5455 5456 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5457 5458 bdev->optimal_io_boundary = 16; 5459 bdev->split_on_optimal_io_boundary = true; 5460 5461 /* Test that a single-vector command which is split is aborted correctly. 5462 * Offset 14, length 8, payload 0xF000 5463 * Child - Offset 14, length 2, payload 0xF000 5464 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5465 */ 5466 g_io_done = false; 5467 5468 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5469 CU_ASSERT(rc == 0); 5470 CU_ASSERT(g_io_done == false); 5471 5472 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5473 5474 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5475 5476 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5477 CU_ASSERT(rc == 0); 5478 CU_ASSERT(g_io_done == true); 5479 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5480 stub_complete_io(2); 5481 CU_ASSERT(g_abort_done == true); 5482 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5483 5484 /* Test that a multi-vector command that needs to be split by strip and then 5485 * needs to be split is aborted correctly. Abort is requested before the second 5486 * child I/O was submitted. The parent I/O should complete with failure without 5487 * submitting the second child I/O. 5488 */ 5489 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5490 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5491 iov[i].iov_len = 512; 5492 } 5493 5494 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5495 g_io_done = false; 5496 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5497 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5498 CU_ASSERT(rc == 0); 5499 CU_ASSERT(g_io_done == false); 5500 5501 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5502 5503 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5504 5505 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5506 CU_ASSERT(rc == 0); 5507 CU_ASSERT(g_io_done == true); 5508 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5509 stub_complete_io(1); 5510 CU_ASSERT(g_abort_done == true); 5511 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5512 5513 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5514 5515 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5516 5517 bdev->optimal_io_boundary = 16; 5518 g_io_done = false; 5519 5520 /* Test that a single-vector command which is split is aborted correctly. 5521 * Differently from the above, the child abort request will be submitted 5522 * sequentially due to the capacity of spdk_bdev_io. 5523 */ 5524 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5525 CU_ASSERT(rc == 0); 5526 CU_ASSERT(g_io_done == false); 5527 5528 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5529 5530 g_abort_done = false; 5531 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5532 5533 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5534 CU_ASSERT(rc == 0); 5535 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5537 5538 stub_complete_io(1); 5539 CU_ASSERT(g_io_done == true); 5540 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5541 stub_complete_io(3); 5542 CU_ASSERT(g_abort_done == true); 5543 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5544 5545 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5546 5547 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5548 5549 bdev->split_on_optimal_io_boundary = false; 5550 bdev->split_on_write_unit = true; 5551 bdev->write_unit_size = 16; 5552 5553 /* Test that a single-vector command which is split is aborted correctly. 5554 * Offset 16, length 32, payload 0xF000 5555 * Child - Offset 16, length 16, payload 0xF000 5556 * Child - Offset 32, length 16, payload 0xF000 + 16 * 512 5557 * 5558 * Use bdev->split_on_write_unit as a split condition. 5559 */ 5560 g_io_done = false; 5561 5562 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 16, 32, io_done, &io_ctx1); 5563 CU_ASSERT(rc == 0); 5564 CU_ASSERT(g_io_done == false); 5565 5566 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5567 5568 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5569 5570 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5571 CU_ASSERT(rc == 0); 5572 CU_ASSERT(g_io_done == true); 5573 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5574 stub_complete_io(2); 5575 CU_ASSERT(g_abort_done == true); 5576 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5577 5578 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5579 5580 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5581 5582 bdev->split_on_write_unit = false; 5583 bdev->max_rw_size = 16; 5584 5585 /* Test that a single-vector command which is split is aborted correctly. 5586 * Use bdev->max_rw_size as a split condition. 5587 */ 5588 g_io_done = false; 5589 5590 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5591 CU_ASSERT(rc == 0); 5592 CU_ASSERT(g_io_done == false); 5593 5594 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5595 5596 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5597 5598 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5599 CU_ASSERT(rc == 0); 5600 CU_ASSERT(g_io_done == true); 5601 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5602 stub_complete_io(2); 5603 CU_ASSERT(g_abort_done == true); 5604 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5605 5606 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5607 5608 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5609 5610 bdev->max_rw_size = 0; 5611 bdev->max_segment_size = 512 * 16; 5612 bdev->max_num_segments = 1; 5613 5614 /* Test that a single-vector command which is split is aborted correctly. 5615 * Use bdev->max_segment_size and bdev->max_num_segments together as split conditions. 5616 * 5617 * One single-vector command is changed to one two-vectors command, but 5618 * bdev->max_num_segments is 1 and it is split into two single-vector commands. 5619 */ 5620 g_io_done = false; 5621 5622 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5623 CU_ASSERT(rc == 0); 5624 CU_ASSERT(g_io_done == false); 5625 5626 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5627 5628 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5629 5630 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5631 CU_ASSERT(rc == 0); 5632 CU_ASSERT(g_io_done == true); 5633 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5634 stub_complete_io(2); 5635 CU_ASSERT(g_abort_done == true); 5636 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5637 5638 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5639 5640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5641 5642 spdk_put_io_channel(io_ch); 5643 spdk_bdev_close(desc); 5644 free_bdev(bdev); 5645 ut_fini_bdev(); 5646 } 5647 5648 static void 5649 bdev_unmap(void) 5650 { 5651 struct spdk_bdev *bdev; 5652 struct spdk_bdev_desc *desc = NULL; 5653 struct spdk_io_channel *ioch; 5654 struct spdk_bdev_channel *bdev_ch; 5655 struct ut_expected_io *expected_io; 5656 struct spdk_bdev_opts bdev_opts = {}; 5657 uint32_t i, num_outstanding; 5658 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5659 int rc; 5660 5661 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5662 bdev_opts.bdev_io_pool_size = 512; 5663 bdev_opts.bdev_io_cache_size = 64; 5664 ut_init_bdev(&bdev_opts); 5665 5666 bdev = allocate_bdev("bdev"); 5667 5668 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5669 CU_ASSERT_EQUAL(rc, 0); 5670 SPDK_CU_ASSERT_FATAL(desc != NULL); 5671 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5672 ioch = spdk_bdev_get_io_channel(desc); 5673 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5674 bdev_ch = spdk_io_channel_get_ctx(ioch); 5675 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5676 5677 fn_table.submit_request = stub_submit_request; 5678 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5679 5680 /* Case 1: First test the request won't be split */ 5681 num_blocks = 32; 5682 5683 g_io_done = false; 5684 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5685 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5686 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5687 CU_ASSERT_EQUAL(rc, 0); 5688 CU_ASSERT(g_io_done == false); 5689 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5690 stub_complete_io(1); 5691 CU_ASSERT(g_io_done == true); 5692 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5693 5694 /* Case 2: Test the split with 2 children requests */ 5695 bdev->max_unmap = 8; 5696 bdev->max_unmap_segments = 2; 5697 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5698 num_blocks = max_unmap_blocks * 2; 5699 offset = 0; 5700 5701 g_io_done = false; 5702 for (i = 0; i < 2; i++) { 5703 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5704 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5705 offset += max_unmap_blocks; 5706 } 5707 5708 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5709 CU_ASSERT_EQUAL(rc, 0); 5710 CU_ASSERT(g_io_done == false); 5711 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5712 stub_complete_io(2); 5713 CU_ASSERT(g_io_done == true); 5714 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5715 5716 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5717 num_children = 15; 5718 num_blocks = max_unmap_blocks * num_children; 5719 g_io_done = false; 5720 offset = 0; 5721 for (i = 0; i < num_children; i++) { 5722 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5723 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5724 offset += max_unmap_blocks; 5725 } 5726 5727 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5728 CU_ASSERT_EQUAL(rc, 0); 5729 CU_ASSERT(g_io_done == false); 5730 5731 while (num_children > 0) { 5732 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5733 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5734 stub_complete_io(num_outstanding); 5735 num_children -= num_outstanding; 5736 } 5737 CU_ASSERT(g_io_done == true); 5738 5739 spdk_put_io_channel(ioch); 5740 spdk_bdev_close(desc); 5741 free_bdev(bdev); 5742 ut_fini_bdev(); 5743 } 5744 5745 static void 5746 bdev_write_zeroes_split_test(void) 5747 { 5748 struct spdk_bdev *bdev; 5749 struct spdk_bdev_desc *desc = NULL; 5750 struct spdk_io_channel *ioch; 5751 struct spdk_bdev_channel *bdev_ch; 5752 struct ut_expected_io *expected_io; 5753 struct spdk_bdev_opts bdev_opts = {}; 5754 uint32_t i, num_outstanding; 5755 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5756 int rc; 5757 5758 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5759 bdev_opts.bdev_io_pool_size = 512; 5760 bdev_opts.bdev_io_cache_size = 64; 5761 ut_init_bdev(&bdev_opts); 5762 5763 bdev = allocate_bdev("bdev"); 5764 5765 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5766 CU_ASSERT_EQUAL(rc, 0); 5767 SPDK_CU_ASSERT_FATAL(desc != NULL); 5768 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5769 ioch = spdk_bdev_get_io_channel(desc); 5770 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5771 bdev_ch = spdk_io_channel_get_ctx(ioch); 5772 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5773 5774 fn_table.submit_request = stub_submit_request; 5775 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5776 5777 /* Case 1: First test the request won't be split */ 5778 num_blocks = 32; 5779 5780 g_io_done = false; 5781 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5782 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5783 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5784 CU_ASSERT_EQUAL(rc, 0); 5785 CU_ASSERT(g_io_done == false); 5786 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5787 stub_complete_io(1); 5788 CU_ASSERT(g_io_done == true); 5789 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5790 5791 /* Case 2: Test the split with 2 children requests */ 5792 max_write_zeroes_blocks = 8; 5793 bdev->max_write_zeroes = max_write_zeroes_blocks; 5794 num_blocks = max_write_zeroes_blocks * 2; 5795 offset = 0; 5796 5797 g_io_done = false; 5798 for (i = 0; i < 2; i++) { 5799 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5800 0); 5801 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5802 offset += max_write_zeroes_blocks; 5803 } 5804 5805 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5806 CU_ASSERT_EQUAL(rc, 0); 5807 CU_ASSERT(g_io_done == false); 5808 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5809 stub_complete_io(2); 5810 CU_ASSERT(g_io_done == true); 5811 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5812 5813 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5814 num_children = 15; 5815 num_blocks = max_write_zeroes_blocks * num_children; 5816 g_io_done = false; 5817 offset = 0; 5818 for (i = 0; i < num_children; i++) { 5819 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5820 0); 5821 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5822 offset += max_write_zeroes_blocks; 5823 } 5824 5825 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5826 CU_ASSERT_EQUAL(rc, 0); 5827 CU_ASSERT(g_io_done == false); 5828 5829 while (num_children > 0) { 5830 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5831 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5832 stub_complete_io(num_outstanding); 5833 num_children -= num_outstanding; 5834 } 5835 CU_ASSERT(g_io_done == true); 5836 5837 spdk_put_io_channel(ioch); 5838 spdk_bdev_close(desc); 5839 free_bdev(bdev); 5840 ut_fini_bdev(); 5841 } 5842 5843 static void 5844 bdev_set_options_test(void) 5845 { 5846 struct spdk_bdev_opts bdev_opts = {}; 5847 int rc; 5848 5849 /* Case1: Do not set opts_size */ 5850 rc = spdk_bdev_set_opts(&bdev_opts); 5851 CU_ASSERT(rc == -1); 5852 } 5853 5854 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5855 5856 static int 5857 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5858 int array_size) 5859 { 5860 if (array_size > 0 && domains) { 5861 domains[0] = g_bdev_memory_domain; 5862 } 5863 5864 return 1; 5865 } 5866 5867 static void 5868 bdev_get_memory_domains(void) 5869 { 5870 struct spdk_bdev_fn_table fn_table = { 5871 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5872 }; 5873 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5874 struct spdk_memory_domain *domains[2] = {}; 5875 int rc; 5876 5877 /* bdev is NULL */ 5878 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5879 CU_ASSERT(rc == -EINVAL); 5880 5881 /* domains is NULL */ 5882 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5883 CU_ASSERT(rc == 1); 5884 5885 /* array size is 0 */ 5886 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5887 CU_ASSERT(rc == 1); 5888 5889 /* get_supported_dma_device_types op is set */ 5890 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5891 CU_ASSERT(rc == 1); 5892 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5893 5894 /* get_supported_dma_device_types op is not set */ 5895 fn_table.get_memory_domains = NULL; 5896 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5897 CU_ASSERT(rc == 0); 5898 } 5899 5900 static void 5901 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5902 { 5903 struct spdk_bdev *bdev; 5904 struct spdk_bdev_desc *desc = NULL; 5905 struct spdk_io_channel *io_ch; 5906 char io_buf[512]; 5907 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5908 struct ut_expected_io *expected_io; 5909 int rc; 5910 5911 ut_init_bdev(NULL); 5912 5913 bdev = allocate_bdev("bdev0"); 5914 bdev->md_interleave = false; 5915 bdev->md_len = 8; 5916 5917 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5918 CU_ASSERT(rc == 0); 5919 SPDK_CU_ASSERT_FATAL(desc != NULL); 5920 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5921 io_ch = spdk_bdev_get_io_channel(desc); 5922 CU_ASSERT(io_ch != NULL); 5923 5924 /* read */ 5925 g_io_done = false; 5926 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5927 if (ext_io_opts) { 5928 expected_io->md_buf = ext_io_opts->metadata; 5929 } 5930 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5931 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5932 5933 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5934 5935 CU_ASSERT(rc == 0); 5936 CU_ASSERT(g_io_done == false); 5937 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5938 stub_complete_io(1); 5939 CU_ASSERT(g_io_done == true); 5940 5941 /* write */ 5942 g_io_done = false; 5943 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5944 if (ext_io_opts) { 5945 expected_io->md_buf = ext_io_opts->metadata; 5946 } 5947 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5948 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5949 5950 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5951 5952 CU_ASSERT(rc == 0); 5953 CU_ASSERT(g_io_done == false); 5954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5955 stub_complete_io(1); 5956 CU_ASSERT(g_io_done == true); 5957 5958 spdk_put_io_channel(io_ch); 5959 spdk_bdev_close(desc); 5960 free_bdev(bdev); 5961 ut_fini_bdev(); 5962 5963 } 5964 5965 static void 5966 bdev_io_ext(void) 5967 { 5968 struct spdk_bdev_ext_io_opts ext_io_opts = { 5969 .metadata = (void *)0xFF000000, 5970 .size = sizeof(ext_io_opts), 5971 .dif_check_flags_exclude_mask = 0 5972 }; 5973 5974 _bdev_io_ext(&ext_io_opts); 5975 } 5976 5977 static void 5978 bdev_io_ext_no_opts(void) 5979 { 5980 _bdev_io_ext(NULL); 5981 } 5982 5983 static void 5984 bdev_io_ext_invalid_opts(void) 5985 { 5986 struct spdk_bdev *bdev; 5987 struct spdk_bdev_desc *desc = NULL; 5988 struct spdk_io_channel *io_ch; 5989 char io_buf[512]; 5990 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5991 struct spdk_bdev_ext_io_opts ext_io_opts = { 5992 .metadata = (void *)0xFF000000, 5993 .size = sizeof(ext_io_opts), 5994 .dif_check_flags_exclude_mask = 0 5995 }; 5996 int rc; 5997 5998 ut_init_bdev(NULL); 5999 6000 bdev = allocate_bdev("bdev0"); 6001 bdev->md_interleave = false; 6002 bdev->md_len = 8; 6003 6004 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6005 CU_ASSERT(rc == 0); 6006 SPDK_CU_ASSERT_FATAL(desc != NULL); 6007 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6008 io_ch = spdk_bdev_get_io_channel(desc); 6009 CU_ASSERT(io_ch != NULL); 6010 6011 /* Test invalid ext_opts size */ 6012 ext_io_opts.size = 0; 6013 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6014 CU_ASSERT(rc == -EINVAL); 6015 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6016 CU_ASSERT(rc == -EINVAL); 6017 6018 ext_io_opts.size = sizeof(ext_io_opts) * 2; 6019 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6020 CU_ASSERT(rc == -EINVAL); 6021 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6022 CU_ASSERT(rc == -EINVAL); 6023 6024 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 6025 sizeof(ext_io_opts.metadata) - 1; 6026 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6027 CU_ASSERT(rc == -EINVAL); 6028 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6029 CU_ASSERT(rc == -EINVAL); 6030 6031 spdk_put_io_channel(io_ch); 6032 spdk_bdev_close(desc); 6033 free_bdev(bdev); 6034 ut_fini_bdev(); 6035 } 6036 6037 static void 6038 bdev_io_ext_split(void) 6039 { 6040 struct spdk_bdev *bdev; 6041 struct spdk_bdev_desc *desc = NULL; 6042 struct spdk_io_channel *io_ch; 6043 char io_buf[512]; 6044 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6045 struct ut_expected_io *expected_io; 6046 struct spdk_bdev_ext_io_opts ext_io_opts = { 6047 .metadata = (void *)0xFF000000, 6048 .size = sizeof(ext_io_opts), 6049 .dif_check_flags_exclude_mask = 0 6050 }; 6051 int rc; 6052 6053 ut_init_bdev(NULL); 6054 6055 bdev = allocate_bdev("bdev0"); 6056 bdev->md_interleave = false; 6057 bdev->md_len = 8; 6058 6059 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6060 CU_ASSERT(rc == 0); 6061 SPDK_CU_ASSERT_FATAL(desc != NULL); 6062 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6063 io_ch = spdk_bdev_get_io_channel(desc); 6064 CU_ASSERT(io_ch != NULL); 6065 6066 /* Check that IO request with ext_opts and metadata is split correctly 6067 * Offset 14, length 8, payload 0xF000 6068 * Child - Offset 14, length 2, payload 0xF000 6069 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 6070 */ 6071 bdev->optimal_io_boundary = 16; 6072 bdev->split_on_optimal_io_boundary = true; 6073 bdev->md_interleave = false; 6074 bdev->md_len = 8; 6075 6076 iov.iov_base = (void *)0xF000; 6077 iov.iov_len = 4096; 6078 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 6079 ext_io_opts.metadata = (void *)0xFF000000; 6080 ext_io_opts.size = sizeof(ext_io_opts); 6081 g_io_done = false; 6082 6083 /* read */ 6084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 6085 expected_io->md_buf = ext_io_opts.metadata; 6086 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6087 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6088 6089 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 6090 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6091 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6092 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6093 6094 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6095 CU_ASSERT(rc == 0); 6096 CU_ASSERT(g_io_done == false); 6097 6098 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6099 stub_complete_io(2); 6100 CU_ASSERT(g_io_done == true); 6101 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6102 6103 /* write */ 6104 g_io_done = false; 6105 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 6106 expected_io->md_buf = ext_io_opts.metadata; 6107 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6108 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6109 6110 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 6111 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6112 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6113 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6114 6115 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6116 CU_ASSERT(rc == 0); 6117 CU_ASSERT(g_io_done == false); 6118 6119 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6120 stub_complete_io(2); 6121 CU_ASSERT(g_io_done == true); 6122 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6123 6124 spdk_put_io_channel(io_ch); 6125 spdk_bdev_close(desc); 6126 free_bdev(bdev); 6127 ut_fini_bdev(); 6128 } 6129 6130 static void 6131 bdev_io_ext_bounce_buffer(void) 6132 { 6133 struct spdk_bdev *bdev; 6134 struct spdk_bdev_desc *desc = NULL; 6135 struct spdk_io_channel *io_ch; 6136 char io_buf[512]; 6137 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6138 struct ut_expected_io *expected_io, *aux_io; 6139 struct spdk_bdev_ext_io_opts ext_io_opts = { 6140 .metadata = (void *)0xFF000000, 6141 .size = sizeof(ext_io_opts), 6142 .dif_check_flags_exclude_mask = 0 6143 }; 6144 int rc; 6145 6146 ut_init_bdev(NULL); 6147 6148 bdev = allocate_bdev("bdev0"); 6149 bdev->md_interleave = false; 6150 bdev->md_len = 8; 6151 6152 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6153 CU_ASSERT(rc == 0); 6154 SPDK_CU_ASSERT_FATAL(desc != NULL); 6155 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6156 io_ch = spdk_bdev_get_io_channel(desc); 6157 CU_ASSERT(io_ch != NULL); 6158 6159 /* Verify data pull/push 6160 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 6161 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 6162 6163 /* read */ 6164 g_io_done = false; 6165 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6166 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6167 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6168 6169 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6170 6171 CU_ASSERT(rc == 0); 6172 CU_ASSERT(g_io_done == false); 6173 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6174 stub_complete_io(1); 6175 CU_ASSERT(g_memory_domain_push_data_called == true); 6176 CU_ASSERT(g_io_done == true); 6177 6178 /* write */ 6179 g_io_done = false; 6180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6181 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6183 6184 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6185 6186 CU_ASSERT(rc == 0); 6187 CU_ASSERT(g_memory_domain_pull_data_called == true); 6188 CU_ASSERT(g_io_done == false); 6189 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6190 stub_complete_io(1); 6191 CU_ASSERT(g_io_done == true); 6192 6193 /* Verify the request is queued after receiving ENOMEM from pull */ 6194 g_io_done = false; 6195 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6196 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6197 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6198 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6199 CU_ASSERT(rc == 0); 6200 CU_ASSERT(g_io_done == false); 6201 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6202 6203 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6204 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6205 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6206 6207 MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM); 6208 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6209 CU_ASSERT(rc == 0); 6210 CU_ASSERT(g_io_done == false); 6211 /* The second IO has been queued */ 6212 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6213 6214 MOCK_CLEAR(spdk_memory_domain_pull_data); 6215 g_memory_domain_pull_data_called = false; 6216 stub_complete_io(1); 6217 CU_ASSERT(g_io_done == true); 6218 CU_ASSERT(g_memory_domain_pull_data_called == true); 6219 /* The second IO should be submitted now */ 6220 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6221 g_io_done = false; 6222 stub_complete_io(1); 6223 CU_ASSERT(g_io_done == true); 6224 6225 /* Verify the request is queued after receiving ENOMEM from push */ 6226 g_io_done = false; 6227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6228 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6229 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6230 6231 MOCK_SET(spdk_memory_domain_push_data, -ENOMEM); 6232 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6233 CU_ASSERT(rc == 0); 6234 CU_ASSERT(g_io_done == false); 6235 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6236 6237 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6238 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6239 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6240 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6241 CU_ASSERT(rc == 0); 6242 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6243 6244 stub_complete_io(1); 6245 /* The IO isn't done yet, it's still waiting on push */ 6246 CU_ASSERT(g_io_done == false); 6247 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6248 MOCK_CLEAR(spdk_memory_domain_push_data); 6249 g_memory_domain_push_data_called = false; 6250 /* Completing the second IO should also trigger push on the first one */ 6251 stub_complete_io(1); 6252 CU_ASSERT(g_io_done == true); 6253 CU_ASSERT(g_memory_domain_push_data_called == true); 6254 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6255 6256 spdk_put_io_channel(io_ch); 6257 spdk_bdev_close(desc); 6258 free_bdev(bdev); 6259 ut_fini_bdev(); 6260 } 6261 6262 static void 6263 bdev_register_uuid_alias(void) 6264 { 6265 struct spdk_bdev *bdev, *second; 6266 char uuid[SPDK_UUID_STRING_LEN]; 6267 int rc; 6268 6269 ut_init_bdev(NULL); 6270 bdev = allocate_bdev("bdev0"); 6271 6272 /* Make sure an UUID was generated */ 6273 CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid)); 6274 6275 /* Check that an UUID alias was registered */ 6276 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6277 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6278 6279 /* Unregister the bdev */ 6280 spdk_bdev_unregister(bdev, NULL, NULL); 6281 poll_threads(); 6282 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6283 6284 /* Check the same, but this time register the bdev with non-zero UUID */ 6285 rc = spdk_bdev_register(bdev); 6286 CU_ASSERT_EQUAL(rc, 0); 6287 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6288 6289 /* Unregister the bdev */ 6290 spdk_bdev_unregister(bdev, NULL, NULL); 6291 poll_threads(); 6292 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6293 6294 /* Register the bdev using UUID as the name */ 6295 bdev->name = uuid; 6296 rc = spdk_bdev_register(bdev); 6297 CU_ASSERT_EQUAL(rc, 0); 6298 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6299 6300 /* Unregister the bdev */ 6301 spdk_bdev_unregister(bdev, NULL, NULL); 6302 poll_threads(); 6303 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6304 6305 /* Check that it's not possible to register two bdevs with the same UUIDs */ 6306 bdev->name = "bdev0"; 6307 second = allocate_bdev("bdev1"); 6308 spdk_uuid_copy(&bdev->uuid, &second->uuid); 6309 rc = spdk_bdev_register(bdev); 6310 CU_ASSERT_EQUAL(rc, -EEXIST); 6311 6312 /* Regenerate the UUID and re-check */ 6313 spdk_uuid_generate(&bdev->uuid); 6314 rc = spdk_bdev_register(bdev); 6315 CU_ASSERT_EQUAL(rc, 0); 6316 6317 /* And check that both bdevs can be retrieved through their UUIDs */ 6318 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6319 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6320 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 6321 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 6322 6323 free_bdev(second); 6324 free_bdev(bdev); 6325 ut_fini_bdev(); 6326 } 6327 6328 static void 6329 bdev_unregister_by_name(void) 6330 { 6331 struct spdk_bdev *bdev; 6332 int rc; 6333 6334 bdev = allocate_bdev("bdev"); 6335 6336 g_event_type1 = 0xFF; 6337 g_unregister_arg = NULL; 6338 g_unregister_rc = -1; 6339 6340 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6341 CU_ASSERT(rc == -ENODEV); 6342 6343 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6344 CU_ASSERT(rc == -ENODEV); 6345 6346 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6347 CU_ASSERT(rc == 0); 6348 6349 /* Check that unregister callback is delayed */ 6350 CU_ASSERT(g_unregister_arg == NULL); 6351 CU_ASSERT(g_unregister_rc == -1); 6352 6353 poll_threads(); 6354 6355 /* Event callback shall not be issued because device was closed */ 6356 CU_ASSERT(g_event_type1 == 0xFF); 6357 /* Unregister callback is issued */ 6358 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 6359 CU_ASSERT(g_unregister_rc == 0); 6360 6361 free_bdev(bdev); 6362 } 6363 6364 static int 6365 count_bdevs(void *ctx, struct spdk_bdev *bdev) 6366 { 6367 int *count = ctx; 6368 6369 (*count)++; 6370 6371 return 0; 6372 } 6373 6374 static void 6375 for_each_bdev_test(void) 6376 { 6377 struct spdk_bdev *bdev[8]; 6378 int rc, count; 6379 6380 bdev[0] = allocate_bdev("bdev0"); 6381 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 6382 6383 bdev[1] = allocate_bdev("bdev1"); 6384 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 6385 CU_ASSERT(rc == 0); 6386 6387 bdev[2] = allocate_bdev("bdev2"); 6388 6389 bdev[3] = allocate_bdev("bdev3"); 6390 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 6391 CU_ASSERT(rc == 0); 6392 6393 bdev[4] = allocate_bdev("bdev4"); 6394 6395 bdev[5] = allocate_bdev("bdev5"); 6396 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 6397 CU_ASSERT(rc == 0); 6398 6399 bdev[6] = allocate_bdev("bdev6"); 6400 6401 bdev[7] = allocate_bdev("bdev7"); 6402 6403 count = 0; 6404 rc = spdk_for_each_bdev(&count, count_bdevs); 6405 CU_ASSERT(rc == 0); 6406 CU_ASSERT(count == 7); 6407 6408 count = 0; 6409 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6410 CU_ASSERT(rc == 0); 6411 CU_ASSERT(count == 4); 6412 6413 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6414 free_bdev(bdev[0]); 6415 free_bdev(bdev[1]); 6416 free_bdev(bdev[2]); 6417 free_bdev(bdev[3]); 6418 free_bdev(bdev[4]); 6419 free_bdev(bdev[5]); 6420 free_bdev(bdev[6]); 6421 free_bdev(bdev[7]); 6422 } 6423 6424 static void 6425 bdev_seek_test(void) 6426 { 6427 struct spdk_bdev *bdev; 6428 struct spdk_bdev_desc *desc = NULL; 6429 struct spdk_io_channel *io_ch; 6430 int rc; 6431 6432 ut_init_bdev(NULL); 6433 poll_threads(); 6434 6435 bdev = allocate_bdev("bdev0"); 6436 6437 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6438 CU_ASSERT(rc == 0); 6439 poll_threads(); 6440 SPDK_CU_ASSERT_FATAL(desc != NULL); 6441 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6442 io_ch = spdk_bdev_get_io_channel(desc); 6443 CU_ASSERT(io_ch != NULL); 6444 6445 /* Seek data not supported */ 6446 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6447 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6448 CU_ASSERT(rc == 0); 6449 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6450 poll_threads(); 6451 CU_ASSERT(g_seek_offset == 0); 6452 6453 /* Seek hole not supported */ 6454 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6455 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6456 CU_ASSERT(rc == 0); 6457 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6458 poll_threads(); 6459 CU_ASSERT(g_seek_offset == UINT64_MAX); 6460 6461 /* Seek data supported */ 6462 g_seek_data_offset = 12345; 6463 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6464 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6465 CU_ASSERT(rc == 0); 6466 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6467 stub_complete_io(1); 6468 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6469 CU_ASSERT(g_seek_offset == 12345); 6470 6471 /* Seek hole supported */ 6472 g_seek_hole_offset = 67890; 6473 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6474 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6475 CU_ASSERT(rc == 0); 6476 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6477 stub_complete_io(1); 6478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6479 CU_ASSERT(g_seek_offset == 67890); 6480 6481 spdk_put_io_channel(io_ch); 6482 spdk_bdev_close(desc); 6483 free_bdev(bdev); 6484 ut_fini_bdev(); 6485 } 6486 6487 static void 6488 bdev_copy(void) 6489 { 6490 struct spdk_bdev *bdev; 6491 struct spdk_bdev_desc *desc = NULL; 6492 struct spdk_io_channel *ioch; 6493 struct ut_expected_io *expected_io; 6494 uint64_t src_offset, num_blocks; 6495 uint32_t num_completed; 6496 int rc; 6497 6498 ut_init_bdev(NULL); 6499 bdev = allocate_bdev("bdev"); 6500 6501 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6502 CU_ASSERT_EQUAL(rc, 0); 6503 SPDK_CU_ASSERT_FATAL(desc != NULL); 6504 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6505 ioch = spdk_bdev_get_io_channel(desc); 6506 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6507 6508 fn_table.submit_request = stub_submit_request; 6509 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6510 6511 /* First test that if the bdev supports copy, the request won't be split */ 6512 bdev->md_len = 0; 6513 bdev->blocklen = 512; 6514 num_blocks = 128; 6515 src_offset = bdev->blockcnt - num_blocks; 6516 6517 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6518 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6519 6520 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6521 CU_ASSERT_EQUAL(rc, 0); 6522 num_completed = stub_complete_io(1); 6523 CU_ASSERT_EQUAL(num_completed, 1); 6524 6525 /* Check that if copy is not supported it'll still work */ 6526 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6528 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6530 6531 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6532 6533 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6534 CU_ASSERT_EQUAL(rc, 0); 6535 num_completed = stub_complete_io(1); 6536 CU_ASSERT_EQUAL(num_completed, 1); 6537 num_completed = stub_complete_io(1); 6538 CU_ASSERT_EQUAL(num_completed, 1); 6539 6540 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6541 spdk_put_io_channel(ioch); 6542 spdk_bdev_close(desc); 6543 free_bdev(bdev); 6544 ut_fini_bdev(); 6545 } 6546 6547 static void 6548 bdev_copy_split_test(void) 6549 { 6550 struct spdk_bdev *bdev; 6551 struct spdk_bdev_desc *desc = NULL; 6552 struct spdk_io_channel *ioch; 6553 struct spdk_bdev_channel *bdev_ch; 6554 struct ut_expected_io *expected_io; 6555 struct spdk_bdev_opts bdev_opts = {}; 6556 uint32_t i, num_outstanding; 6557 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6558 int rc; 6559 6560 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6561 bdev_opts.bdev_io_pool_size = 512; 6562 bdev_opts.bdev_io_cache_size = 64; 6563 rc = spdk_bdev_set_opts(&bdev_opts); 6564 CU_ASSERT(rc == 0); 6565 6566 ut_init_bdev(NULL); 6567 bdev = allocate_bdev("bdev"); 6568 6569 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6570 CU_ASSERT_EQUAL(rc, 0); 6571 SPDK_CU_ASSERT_FATAL(desc != NULL); 6572 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6573 ioch = spdk_bdev_get_io_channel(desc); 6574 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6575 bdev_ch = spdk_io_channel_get_ctx(ioch); 6576 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6577 6578 fn_table.submit_request = stub_submit_request; 6579 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6580 6581 /* Case 1: First test the request won't be split */ 6582 num_blocks = 32; 6583 src_offset = bdev->blockcnt - num_blocks; 6584 6585 g_io_done = false; 6586 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6587 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6588 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6589 CU_ASSERT_EQUAL(rc, 0); 6590 CU_ASSERT(g_io_done == false); 6591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6592 stub_complete_io(1); 6593 CU_ASSERT(g_io_done == true); 6594 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6595 6596 /* Case 2: Test the split with 2 children requests */ 6597 max_copy_blocks = 8; 6598 bdev->max_copy = max_copy_blocks; 6599 num_children = 2; 6600 num_blocks = max_copy_blocks * num_children; 6601 offset = 0; 6602 src_offset = bdev->blockcnt - num_blocks; 6603 6604 g_io_done = false; 6605 for (i = 0; i < num_children; i++) { 6606 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6607 src_offset + offset, max_copy_blocks); 6608 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6609 offset += max_copy_blocks; 6610 } 6611 6612 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6613 CU_ASSERT_EQUAL(rc, 0); 6614 CU_ASSERT(g_io_done == false); 6615 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6616 stub_complete_io(num_children); 6617 CU_ASSERT(g_io_done == true); 6618 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6619 6620 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6621 num_children = 15; 6622 num_blocks = max_copy_blocks * num_children; 6623 offset = 0; 6624 src_offset = bdev->blockcnt - num_blocks; 6625 6626 g_io_done = false; 6627 for (i = 0; i < num_children; i++) { 6628 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6629 src_offset + offset, max_copy_blocks); 6630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6631 offset += max_copy_blocks; 6632 } 6633 6634 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6635 CU_ASSERT_EQUAL(rc, 0); 6636 CU_ASSERT(g_io_done == false); 6637 6638 while (num_children > 0) { 6639 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6641 stub_complete_io(num_outstanding); 6642 num_children -= num_outstanding; 6643 } 6644 CU_ASSERT(g_io_done == true); 6645 6646 /* Case 4: Same test scenario as the case 2 but the configuration is different. 6647 * Copy is not supported. 6648 */ 6649 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6650 6651 num_children = 2; 6652 max_copy_blocks = spdk_bdev_get_max_copy(bdev); 6653 num_blocks = max_copy_blocks * num_children; 6654 src_offset = bdev->blockcnt - num_blocks; 6655 offset = 0; 6656 6657 g_io_done = false; 6658 for (i = 0; i < num_children; i++) { 6659 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, 6660 max_copy_blocks, 0); 6661 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6662 src_offset += max_copy_blocks; 6663 } 6664 for (i = 0; i < num_children; i++) { 6665 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, 6666 max_copy_blocks, 0); 6667 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6668 offset += max_copy_blocks; 6669 } 6670 6671 src_offset = bdev->blockcnt - num_blocks; 6672 offset = 0; 6673 6674 rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL); 6675 CU_ASSERT_EQUAL(rc, 0); 6676 CU_ASSERT(g_io_done == false); 6677 6678 while (num_children > 0) { 6679 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6680 6681 /* One copy request is split into one read and one write requests. */ 6682 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6683 stub_complete_io(num_outstanding); 6684 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6685 stub_complete_io(num_outstanding); 6686 6687 num_children -= num_outstanding; 6688 } 6689 CU_ASSERT(g_io_done == true); 6690 6691 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6692 6693 spdk_put_io_channel(ioch); 6694 spdk_bdev_close(desc); 6695 free_bdev(bdev); 6696 ut_fini_bdev(); 6697 } 6698 6699 static void 6700 examine_claim_v1(struct spdk_bdev *bdev) 6701 { 6702 int rc; 6703 6704 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6705 CU_ASSERT(rc == 0); 6706 } 6707 6708 static void 6709 examine_no_lock_held(struct spdk_bdev *bdev) 6710 { 6711 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6712 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6713 } 6714 6715 struct examine_claim_v2_ctx { 6716 struct ut_examine_ctx examine_ctx; 6717 enum spdk_bdev_claim_type claim_type; 6718 struct spdk_bdev_desc *desc; 6719 }; 6720 6721 static void 6722 examine_claim_v2(struct spdk_bdev *bdev) 6723 { 6724 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6725 int rc; 6726 6727 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6728 CU_ASSERT(rc == 0); 6729 6730 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6731 CU_ASSERT(rc == 0); 6732 } 6733 6734 static void 6735 examine_locks(void) 6736 { 6737 struct spdk_bdev *bdev; 6738 struct ut_examine_ctx ctx = { 0 }; 6739 struct examine_claim_v2_ctx v2_ctx; 6740 6741 /* Without any claims, one code path is taken */ 6742 ctx.examine_config = examine_no_lock_held; 6743 ctx.examine_disk = examine_no_lock_held; 6744 bdev = allocate_bdev_ctx("bdev0", &ctx); 6745 CU_ASSERT(ctx.examine_config_count == 1); 6746 CU_ASSERT(ctx.examine_disk_count == 1); 6747 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6748 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6749 free_bdev(bdev); 6750 6751 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6752 memset(&ctx, 0, sizeof(ctx)); 6753 ctx.examine_config = examine_claim_v1; 6754 ctx.examine_disk = examine_no_lock_held; 6755 bdev = allocate_bdev_ctx("bdev0", &ctx); 6756 CU_ASSERT(ctx.examine_config_count == 1); 6757 CU_ASSERT(ctx.examine_disk_count == 1); 6758 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6759 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6760 spdk_bdev_module_release_bdev(bdev); 6761 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6762 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6763 free_bdev(bdev); 6764 6765 /* Exercise the final path that comes with v2 claims. */ 6766 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6767 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6768 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6769 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6770 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6771 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6772 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6773 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6774 spdk_bdev_close(v2_ctx.desc); 6775 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6776 free_bdev(bdev); 6777 } 6778 6779 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6780 do { \ 6781 uint32_t len = 0; \ 6782 struct spdk_bdev_module_claim *claim; \ 6783 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6784 len++; \ 6785 } \ 6786 CU_ASSERT(len == expect); \ 6787 } while (0) 6788 6789 static void 6790 claim_v2_rwo(void) 6791 { 6792 struct spdk_bdev *bdev; 6793 struct spdk_bdev_desc *desc; 6794 struct spdk_bdev_desc *desc2; 6795 struct spdk_bdev_claim_opts opts; 6796 int rc; 6797 6798 bdev = allocate_bdev("bdev0"); 6799 6800 /* Claim without options */ 6801 desc = NULL; 6802 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6803 CU_ASSERT(rc == 0); 6804 SPDK_CU_ASSERT_FATAL(desc != NULL); 6805 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6806 &bdev_ut_if); 6807 CU_ASSERT(rc == 0); 6808 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6809 CU_ASSERT(desc->claim != NULL); 6810 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6811 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6812 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6813 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6814 6815 /* Release the claim by closing the descriptor */ 6816 spdk_bdev_close(desc); 6817 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6818 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6819 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6820 6821 /* Claim with options */ 6822 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6823 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6824 desc = NULL; 6825 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6826 CU_ASSERT(rc == 0); 6827 SPDK_CU_ASSERT_FATAL(desc != NULL); 6828 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6829 &bdev_ut_if); 6830 CU_ASSERT(rc == 0); 6831 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6832 CU_ASSERT(desc->claim != NULL); 6833 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6834 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6835 memset(&opts, 0, sizeof(opts)); 6836 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6837 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6838 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6839 6840 /* The claim blocks new writers. */ 6841 desc2 = NULL; 6842 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6843 CU_ASSERT(rc == -EPERM); 6844 CU_ASSERT(desc2 == NULL); 6845 6846 /* New readers are allowed */ 6847 desc2 = NULL; 6848 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6849 CU_ASSERT(rc == 0); 6850 CU_ASSERT(desc2 != NULL); 6851 CU_ASSERT(!desc2->write); 6852 6853 /* No new v2 RWO claims are allowed */ 6854 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6855 &bdev_ut_if); 6856 CU_ASSERT(rc == -EPERM); 6857 6858 /* No new v2 ROM claims are allowed */ 6859 CU_ASSERT(!desc2->write); 6860 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6861 &bdev_ut_if); 6862 CU_ASSERT(rc == -EPERM); 6863 CU_ASSERT(!desc2->write); 6864 6865 /* No new v2 RWM claims are allowed */ 6866 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6867 opts.shared_claim_key = (uint64_t)&opts; 6868 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6869 &bdev_ut_if); 6870 CU_ASSERT(rc == -EPERM); 6871 CU_ASSERT(!desc2->write); 6872 6873 /* No new v1 claims are allowed */ 6874 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6875 CU_ASSERT(rc == -EPERM); 6876 6877 /* None of the above changed the existing claim */ 6878 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6879 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6880 6881 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6882 spdk_bdev_close(desc); 6883 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6884 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6885 CU_ASSERT(!desc2->write); 6886 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6887 &bdev_ut_if); 6888 CU_ASSERT(rc == 0); 6889 CU_ASSERT(desc2->claim != NULL); 6890 CU_ASSERT(desc2->write); 6891 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6892 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6893 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6894 spdk_bdev_close(desc2); 6895 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6896 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6897 6898 /* Cannot claim with a key */ 6899 desc = NULL; 6900 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6901 CU_ASSERT(rc == 0); 6902 SPDK_CU_ASSERT_FATAL(desc != NULL); 6903 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6904 opts.shared_claim_key = (uint64_t)&opts; 6905 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6906 &bdev_ut_if); 6907 CU_ASSERT(rc == -EINVAL); 6908 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6909 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6910 spdk_bdev_close(desc); 6911 6912 /* Clean up */ 6913 free_bdev(bdev); 6914 } 6915 6916 static void 6917 claim_v2_rom(void) 6918 { 6919 struct spdk_bdev *bdev; 6920 struct spdk_bdev_desc *desc; 6921 struct spdk_bdev_desc *desc2; 6922 struct spdk_bdev_claim_opts opts; 6923 int rc; 6924 6925 bdev = allocate_bdev("bdev0"); 6926 6927 /* Claim without options */ 6928 desc = NULL; 6929 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6930 CU_ASSERT(rc == 0); 6931 SPDK_CU_ASSERT_FATAL(desc != NULL); 6932 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6933 &bdev_ut_if); 6934 CU_ASSERT(rc == 0); 6935 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6936 CU_ASSERT(desc->claim != NULL); 6937 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6938 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6939 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6940 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6941 6942 /* Release the claim by closing the descriptor */ 6943 spdk_bdev_close(desc); 6944 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6945 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6946 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6947 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6948 6949 /* Claim with options */ 6950 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6951 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6952 desc = NULL; 6953 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6954 CU_ASSERT(rc == 0); 6955 SPDK_CU_ASSERT_FATAL(desc != NULL); 6956 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6957 &bdev_ut_if); 6958 CU_ASSERT(rc == 0); 6959 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6960 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6961 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6962 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6963 memset(&opts, 0, sizeof(opts)); 6964 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6965 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6966 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6967 6968 /* The claim blocks new writers. */ 6969 desc2 = NULL; 6970 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6971 CU_ASSERT(rc == -EPERM); 6972 CU_ASSERT(desc2 == NULL); 6973 6974 /* New readers are allowed */ 6975 desc2 = NULL; 6976 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6977 CU_ASSERT(rc == 0); 6978 CU_ASSERT(desc2 != NULL); 6979 CU_ASSERT(!desc2->write); 6980 6981 /* No new v2 RWO claims are allowed */ 6982 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6983 &bdev_ut_if); 6984 CU_ASSERT(rc == -EPERM); 6985 6986 /* No new v2 RWM claims are allowed */ 6987 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6988 opts.shared_claim_key = (uint64_t)&opts; 6989 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6990 &bdev_ut_if); 6991 CU_ASSERT(rc == -EPERM); 6992 CU_ASSERT(!desc2->write); 6993 6994 /* No new v1 claims are allowed */ 6995 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6996 CU_ASSERT(rc == -EPERM); 6997 6998 /* None of the above messed up the existing claim */ 6999 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7000 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7001 7002 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 7003 CU_ASSERT(!desc2->write); 7004 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7005 &bdev_ut_if); 7006 CU_ASSERT(rc == 0); 7007 CU_ASSERT(!desc2->write); 7008 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7009 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7010 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7011 7012 /* Claim remains when closing the first descriptor */ 7013 spdk_bdev_close(desc); 7014 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 7015 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7016 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7017 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7018 7019 /* Claim removed when closing the other descriptor */ 7020 spdk_bdev_close(desc2); 7021 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7022 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7023 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7024 7025 /* Cannot claim with a key */ 7026 desc = NULL; 7027 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7028 CU_ASSERT(rc == 0); 7029 SPDK_CU_ASSERT_FATAL(desc != NULL); 7030 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7031 opts.shared_claim_key = (uint64_t)&opts; 7032 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 7033 &bdev_ut_if); 7034 CU_ASSERT(rc == -EINVAL); 7035 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7036 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7037 spdk_bdev_close(desc); 7038 7039 /* Cannot claim with a read-write descriptor */ 7040 desc = NULL; 7041 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7042 CU_ASSERT(rc == 0); 7043 SPDK_CU_ASSERT_FATAL(desc != NULL); 7044 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7045 &bdev_ut_if); 7046 CU_ASSERT(rc == -EINVAL); 7047 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7048 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7049 spdk_bdev_close(desc); 7050 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7051 7052 /* Clean up */ 7053 free_bdev(bdev); 7054 } 7055 7056 static void 7057 claim_v2_rwm(void) 7058 { 7059 struct spdk_bdev *bdev; 7060 struct spdk_bdev_desc *desc; 7061 struct spdk_bdev_desc *desc2; 7062 struct spdk_bdev_claim_opts opts; 7063 char good_key, bad_key; 7064 int rc; 7065 7066 bdev = allocate_bdev("bdev0"); 7067 7068 /* Claim without options should fail */ 7069 desc = NULL; 7070 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7071 CU_ASSERT(rc == 0); 7072 SPDK_CU_ASSERT_FATAL(desc != NULL); 7073 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 7074 &bdev_ut_if); 7075 CU_ASSERT(rc == -EINVAL); 7076 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7077 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7078 CU_ASSERT(desc->claim == NULL); 7079 7080 /* Claim with options */ 7081 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7082 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 7083 opts.shared_claim_key = (uint64_t)&good_key; 7084 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7085 &bdev_ut_if); 7086 CU_ASSERT(rc == 0); 7087 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7088 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 7089 CU_ASSERT(desc->claim->module == &bdev_ut_if); 7090 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7091 memset(&opts, 0, sizeof(opts)); 7092 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7093 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7094 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7095 7096 /* The claim blocks new writers. */ 7097 desc2 = NULL; 7098 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7099 CU_ASSERT(rc == -EPERM); 7100 CU_ASSERT(desc2 == NULL); 7101 7102 /* New readers are allowed */ 7103 desc2 = NULL; 7104 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 7105 CU_ASSERT(rc == 0); 7106 CU_ASSERT(desc2 != NULL); 7107 CU_ASSERT(!desc2->write); 7108 7109 /* No new v2 RWO claims are allowed */ 7110 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 7111 &bdev_ut_if); 7112 CU_ASSERT(rc == -EPERM); 7113 7114 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 7115 CU_ASSERT(!desc2->write); 7116 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7117 &bdev_ut_if); 7118 CU_ASSERT(rc == -EPERM); 7119 CU_ASSERT(!desc2->write); 7120 7121 /* No new v1 claims are allowed */ 7122 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7123 CU_ASSERT(rc == -EPERM); 7124 7125 /* No new v2 RWM claims are allowed if the key does not match */ 7126 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7127 opts.shared_claim_key = (uint64_t)&bad_key; 7128 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7129 &bdev_ut_if); 7130 CU_ASSERT(rc == -EPERM); 7131 CU_ASSERT(!desc2->write); 7132 7133 /* None of the above messed up the existing claim */ 7134 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7135 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7136 7137 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 7138 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7139 opts.shared_claim_key = (uint64_t)&good_key; 7140 CU_ASSERT(!desc2->write); 7141 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7142 &bdev_ut_if); 7143 CU_ASSERT(rc == 0); 7144 CU_ASSERT(desc2->write); 7145 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7146 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7147 7148 /* Claim remains when closing the first descriptor */ 7149 spdk_bdev_close(desc); 7150 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7151 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7152 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7153 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7154 7155 /* Claim removed when closing the other descriptor */ 7156 spdk_bdev_close(desc2); 7157 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7158 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7159 7160 /* Cannot claim without a key */ 7161 desc = NULL; 7162 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7163 CU_ASSERT(rc == 0); 7164 SPDK_CU_ASSERT_FATAL(desc != NULL); 7165 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7166 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7167 &bdev_ut_if); 7168 CU_ASSERT(rc == -EINVAL); 7169 spdk_bdev_close(desc); 7170 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7171 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7172 7173 /* Clean up */ 7174 free_bdev(bdev); 7175 } 7176 7177 static void 7178 claim_v2_existing_writer(void) 7179 { 7180 struct spdk_bdev *bdev; 7181 struct spdk_bdev_desc *desc; 7182 struct spdk_bdev_desc *desc2; 7183 struct spdk_bdev_claim_opts opts; 7184 enum spdk_bdev_claim_type type; 7185 enum spdk_bdev_claim_type types[] = { 7186 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7187 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7188 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7189 }; 7190 size_t i; 7191 int rc; 7192 7193 bdev = allocate_bdev("bdev0"); 7194 7195 desc = NULL; 7196 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7197 CU_ASSERT(rc == 0); 7198 SPDK_CU_ASSERT_FATAL(desc != NULL); 7199 desc2 = NULL; 7200 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7201 CU_ASSERT(rc == 0); 7202 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 7203 7204 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7205 type = types[i]; 7206 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7207 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7208 opts.shared_claim_key = (uint64_t)&opts; 7209 } 7210 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7211 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7212 CU_ASSERT(rc == -EINVAL); 7213 } else { 7214 CU_ASSERT(rc == -EPERM); 7215 } 7216 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7217 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 7218 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7219 CU_ASSERT(rc == -EINVAL); 7220 } else { 7221 CU_ASSERT(rc == -EPERM); 7222 } 7223 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7224 } 7225 7226 spdk_bdev_close(desc); 7227 spdk_bdev_close(desc2); 7228 7229 /* Clean up */ 7230 free_bdev(bdev); 7231 } 7232 7233 static void 7234 claim_v2_existing_v1(void) 7235 { 7236 struct spdk_bdev *bdev; 7237 struct spdk_bdev_desc *desc; 7238 struct spdk_bdev_claim_opts opts; 7239 enum spdk_bdev_claim_type type; 7240 enum spdk_bdev_claim_type types[] = { 7241 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7242 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7243 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7244 }; 7245 size_t i; 7246 int rc; 7247 7248 bdev = allocate_bdev("bdev0"); 7249 7250 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7251 CU_ASSERT(rc == 0); 7252 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7253 7254 desc = NULL; 7255 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7256 CU_ASSERT(rc == 0); 7257 SPDK_CU_ASSERT_FATAL(desc != NULL); 7258 7259 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7260 type = types[i]; 7261 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7262 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7263 opts.shared_claim_key = (uint64_t)&opts; 7264 } 7265 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7266 CU_ASSERT(rc == -EPERM); 7267 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7268 } 7269 7270 spdk_bdev_module_release_bdev(bdev); 7271 spdk_bdev_close(desc); 7272 7273 /* Clean up */ 7274 free_bdev(bdev); 7275 } 7276 7277 static void 7278 claim_v1_existing_v2(void) 7279 { 7280 struct spdk_bdev *bdev; 7281 struct spdk_bdev_desc *desc; 7282 struct spdk_bdev_claim_opts opts; 7283 enum spdk_bdev_claim_type type; 7284 enum spdk_bdev_claim_type types[] = { 7285 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7286 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7287 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7288 }; 7289 size_t i; 7290 int rc; 7291 7292 bdev = allocate_bdev("bdev0"); 7293 7294 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7295 type = types[i]; 7296 7297 desc = NULL; 7298 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7299 CU_ASSERT(rc == 0); 7300 SPDK_CU_ASSERT_FATAL(desc != NULL); 7301 7302 /* Get a v2 claim */ 7303 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7304 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7305 opts.shared_claim_key = (uint64_t)&opts; 7306 } 7307 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7308 CU_ASSERT(rc == 0); 7309 7310 /* Fail to get a v1 claim */ 7311 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7312 CU_ASSERT(rc == -EPERM); 7313 7314 spdk_bdev_close(desc); 7315 7316 /* Now v1 succeeds */ 7317 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7318 CU_ASSERT(rc == 0) 7319 spdk_bdev_module_release_bdev(bdev); 7320 } 7321 7322 /* Clean up */ 7323 free_bdev(bdev); 7324 } 7325 7326 static int ut_examine_claimed_init0(void); 7327 static int ut_examine_claimed_init1(void); 7328 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 7329 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 7330 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 7331 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 7332 7333 #define UT_MAX_EXAMINE_MODS 2 7334 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 7335 { 7336 .name = "vbdev_ut_examine0", 7337 .module_init = ut_examine_claimed_init0, 7338 .module_fini = vbdev_ut_module_fini, 7339 .examine_config = ut_examine_claimed_config0, 7340 .examine_disk = ut_examine_claimed_disk0, 7341 }, 7342 { 7343 .name = "vbdev_ut_examine1", 7344 .module_init = ut_examine_claimed_init1, 7345 .module_fini = vbdev_ut_module_fini, 7346 .examine_config = ut_examine_claimed_config1, 7347 .examine_disk = ut_examine_claimed_disk1, 7348 } 7349 }; 7350 7351 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 7352 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 7353 7354 struct ut_examine_claimed_ctx { 7355 uint32_t examine_config_count; 7356 uint32_t examine_disk_count; 7357 7358 /* Claim type to take, with these options */ 7359 enum spdk_bdev_claim_type claim_type; 7360 struct spdk_bdev_claim_opts claim_opts; 7361 7362 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 7363 int expect_claim_err; 7364 7365 /* Descriptor used for a claim */ 7366 struct spdk_bdev_desc *desc; 7367 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 7368 7369 bool ut_testing_examine_claimed; 7370 7371 /* 7372 * Store the order in which the modules were initialized, 7373 * since we have no guarantee on the order of execution of the constructors. 7374 * Modules are examined in reverse order of their initialization. 7375 */ 7376 static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS]; 7377 static int 7378 ut_examine_claimed_init(uint32_t modnum) 7379 { 7380 static int current = UT_MAX_EXAMINE_MODS; 7381 7382 /* Only do this for the first initialization of the bdev framework */ 7383 if (current == 0) { 7384 return 0; 7385 } 7386 g_ut_examine_claimed_order[modnum] = --current; 7387 7388 return 0; 7389 } 7390 7391 static int 7392 ut_examine_claimed_init0(void) 7393 { 7394 return ut_examine_claimed_init(0); 7395 } 7396 7397 static int 7398 ut_examine_claimed_init1(void) 7399 { 7400 return ut_examine_claimed_init(1); 7401 } 7402 7403 static void 7404 reset_examine_claimed_ctx(void) 7405 { 7406 struct ut_examine_claimed_ctx *ctx; 7407 uint32_t i; 7408 7409 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 7410 ctx = &examine_claimed_ctx[i]; 7411 if (ctx->desc != NULL) { 7412 spdk_bdev_close(ctx->desc); 7413 } 7414 memset(ctx, 0, sizeof(*ctx)); 7415 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 7416 } 7417 } 7418 7419 static void 7420 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 7421 { 7422 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7423 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7424 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7425 int rc; 7426 7427 if (!ut_testing_examine_claimed) { 7428 spdk_bdev_module_examine_done(module); 7429 return; 7430 } 7431 7432 ctx->examine_config_count++; 7433 7434 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 7435 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 7436 &ctx->desc); 7437 CU_ASSERT(rc == 0); 7438 7439 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 7440 CU_ASSERT(rc == ctx->expect_claim_err); 7441 } 7442 spdk_bdev_module_examine_done(module); 7443 } 7444 7445 static void 7446 ut_examine_claimed_config0(struct spdk_bdev *bdev) 7447 { 7448 examine_claimed_config(bdev, g_ut_examine_claimed_order[0]); 7449 } 7450 7451 static void 7452 ut_examine_claimed_config1(struct spdk_bdev *bdev) 7453 { 7454 examine_claimed_config(bdev, g_ut_examine_claimed_order[1]); 7455 } 7456 7457 static void 7458 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 7459 { 7460 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7461 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7462 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7463 7464 if (!ut_testing_examine_claimed) { 7465 spdk_bdev_module_examine_done(module); 7466 return; 7467 } 7468 7469 ctx->examine_disk_count++; 7470 7471 spdk_bdev_module_examine_done(module); 7472 } 7473 7474 static void 7475 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 7476 { 7477 examine_claimed_disk(bdev, 0); 7478 } 7479 7480 static void 7481 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 7482 { 7483 examine_claimed_disk(bdev, 1); 7484 } 7485 7486 static bool g_examine_done = false; 7487 7488 static void 7489 ut_examine_done_cb(void *ctx) 7490 { 7491 g_examine_done = true; 7492 } 7493 7494 static void 7495 examine_claimed_common(bool autoexamine) 7496 { 7497 struct spdk_bdev *bdev; 7498 struct spdk_bdev_module *mod = examine_claimed_mods; 7499 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7500 struct spdk_bdev_opts bdev_opts = {}; 7501 int rc; 7502 7503 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 7504 bdev_opts.bdev_auto_examine = autoexamine; 7505 ut_init_bdev(&bdev_opts); 7506 7507 ut_testing_examine_claimed = true; 7508 reset_examine_claimed_ctx(); 7509 7510 /* 7511 * With one module claiming, both modules' examine_config should be called, but only the 7512 * claiming module's examine_disk should be called. 7513 */ 7514 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7515 g_examine_done = false; 7516 bdev = allocate_bdev("bdev0"); 7517 7518 if (!autoexamine) { 7519 rc = spdk_bdev_examine("bdev0"); 7520 CU_ASSERT(rc == 0); 7521 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7522 CU_ASSERT(rc == 0); 7523 CU_ASSERT(!g_examine_done); 7524 poll_threads(); 7525 CU_ASSERT(g_examine_done); 7526 } 7527 7528 CU_ASSERT(ctx[0].examine_config_count == 1); 7529 CU_ASSERT(ctx[0].examine_disk_count == 1); 7530 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7531 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7532 CU_ASSERT(ctx[1].examine_config_count == 1); 7533 CU_ASSERT(ctx[1].examine_disk_count == 0); 7534 CU_ASSERT(ctx[1].desc == NULL); 7535 reset_examine_claimed_ctx(); 7536 free_bdev(bdev); 7537 7538 /* 7539 * With two modules claiming, both modules' examine_config and examine_disk should be 7540 * called. 7541 */ 7542 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7543 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7544 g_examine_done = false; 7545 bdev = allocate_bdev("bdev0"); 7546 7547 if (!autoexamine) { 7548 rc = spdk_bdev_examine("bdev0"); 7549 CU_ASSERT(rc == 0); 7550 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7551 CU_ASSERT(rc == 0); 7552 CU_ASSERT(!g_examine_done); 7553 poll_threads(); 7554 CU_ASSERT(g_examine_done); 7555 } 7556 7557 CU_ASSERT(ctx[0].examine_config_count == 1); 7558 CU_ASSERT(ctx[0].examine_disk_count == 1); 7559 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7560 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7561 CU_ASSERT(ctx[1].examine_config_count == 1); 7562 CU_ASSERT(ctx[1].examine_disk_count == 1); 7563 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7564 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7565 reset_examine_claimed_ctx(); 7566 free_bdev(bdev); 7567 7568 /* 7569 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7570 * last wins. The winner gets the claim and is the only one that has its examine_disk 7571 * callback invoked. 7572 */ 7573 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7574 ctx[0].expect_claim_err = -EPERM; 7575 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7576 g_examine_done = false; 7577 bdev = allocate_bdev("bdev0"); 7578 7579 if (!autoexamine) { 7580 rc = spdk_bdev_examine("bdev0"); 7581 CU_ASSERT(rc == 0); 7582 rc = spdk_bdev_wait_for_examine(ut_examine_done_cb, NULL); 7583 CU_ASSERT(rc == 0); 7584 CU_ASSERT(!g_examine_done); 7585 poll_threads(); 7586 CU_ASSERT(g_examine_done); 7587 } 7588 7589 CU_ASSERT(ctx[0].examine_config_count == 1); 7590 CU_ASSERT(ctx[0].examine_disk_count == 0); 7591 CU_ASSERT(ctx[1].examine_config_count == 1); 7592 CU_ASSERT(ctx[1].examine_disk_count == 1); 7593 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7594 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7595 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7596 reset_examine_claimed_ctx(); 7597 free_bdev(bdev); 7598 7599 ut_testing_examine_claimed = false; 7600 7601 ut_fini_bdev(); 7602 } 7603 7604 static void 7605 examine_claimed(void) 7606 { 7607 examine_claimed_common(true); 7608 } 7609 7610 static void 7611 examine_claimed_manual(void) 7612 { 7613 examine_claimed_common(false); 7614 } 7615 7616 static void 7617 get_numa_id(void) 7618 { 7619 struct spdk_bdev bdev = {}; 7620 7621 bdev.numa.id = 0; 7622 bdev.numa.id_valid = 0; 7623 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7624 7625 bdev.numa.id_valid = 1; 7626 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == 0); 7627 7628 bdev.numa.id = SPDK_ENV_NUMA_ID_ANY; 7629 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7630 } 7631 7632 static void 7633 get_device_stat_with_reset_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, 7634 int rc) 7635 { 7636 *(bool *)cb_arg = true; 7637 } 7638 7639 static void 7640 get_device_stat_with_given_reset(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, 7641 enum spdk_bdev_reset_stat_mode mode) 7642 { 7643 bool done = false; 7644 7645 spdk_bdev_get_device_stat(bdev, stat, mode, get_device_stat_with_reset_cb, &done); 7646 while (!done) { poll_threads(); } 7647 } 7648 7649 static void 7650 get_device_stat_with_reset(void) 7651 { 7652 struct spdk_bdev *bdev; 7653 struct spdk_bdev_desc *desc = NULL; 7654 struct spdk_io_channel *io_ch; 7655 struct spdk_bdev_opts bdev_opts = {}; 7656 struct spdk_bdev_io_stat *stat; 7657 7658 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 7659 bdev_opts.bdev_io_pool_size = 2; 7660 bdev_opts.bdev_io_cache_size = 1; 7661 ut_init_bdev(&bdev_opts); 7662 bdev = allocate_bdev("bdev0"); 7663 7664 CU_ASSERT(spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc) == 0); 7665 SPDK_CU_ASSERT_FATAL(desc != NULL); 7666 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 7667 io_ch = spdk_bdev_get_io_channel(desc); 7668 CU_ASSERT(io_ch != NULL); 7669 7670 g_io_done = false; 7671 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 7672 spdk_delay_us(10); 7673 stub_complete_io(1); 7674 CU_ASSERT(g_io_done == true); 7675 7676 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 7677 SPDK_CU_ASSERT_FATAL(stat != NULL); 7678 7679 /* Get stat without resetting and check that it is correct */ 7680 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE); 7681 CU_ASSERT(stat->bytes_read == 4096); 7682 CU_ASSERT(stat->max_read_latency_ticks == 10); 7683 7684 /** 7685 * Check that stat was not reseted after previous step, 7686 * send get request with resetting maxmin stats 7687 */ 7688 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_MAXMIN); 7689 CU_ASSERT(stat->bytes_read == 4096); 7690 CU_ASSERT(stat->max_read_latency_ticks == 10); 7691 7692 /** 7693 * Check that maxmins stats are reseted after previous step, 7694 * send get request with resetting all stats 7695 */ 7696 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_ALL); 7697 CU_ASSERT(stat->bytes_read == 4096); 7698 CU_ASSERT(stat->max_read_latency_ticks == 0); 7699 7700 /* Check that all stats are reseted after previous step */ 7701 get_device_stat_with_given_reset(bdev, stat, SPDK_BDEV_RESET_STAT_NONE); 7702 CU_ASSERT(stat->bytes_read == 0); 7703 CU_ASSERT(stat->max_read_latency_ticks == 0); 7704 7705 free(stat); 7706 spdk_put_io_channel(io_ch); 7707 spdk_bdev_close(desc); 7708 free_bdev(bdev); 7709 ut_fini_bdev(); 7710 } 7711 7712 int 7713 main(int argc, char **argv) 7714 { 7715 CU_pSuite suite = NULL; 7716 unsigned int num_failures; 7717 7718 CU_initialize_registry(); 7719 7720 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7721 7722 CU_ADD_TEST(suite, bytes_to_blocks_test); 7723 CU_ADD_TEST(suite, num_blocks_test); 7724 CU_ADD_TEST(suite, io_valid_test); 7725 CU_ADD_TEST(suite, open_write_test); 7726 CU_ADD_TEST(suite, claim_test); 7727 CU_ADD_TEST(suite, alias_add_del_test); 7728 CU_ADD_TEST(suite, get_device_stat_test); 7729 CU_ADD_TEST(suite, bdev_io_types_test); 7730 CU_ADD_TEST(suite, bdev_io_wait_test); 7731 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7732 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7733 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7734 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7735 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7736 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7737 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7738 CU_ADD_TEST(suite, bdev_io_alignment); 7739 CU_ADD_TEST(suite, bdev_histograms); 7740 CU_ADD_TEST(suite, bdev_write_zeroes); 7741 CU_ADD_TEST(suite, bdev_compare_and_write); 7742 CU_ADD_TEST(suite, bdev_compare); 7743 CU_ADD_TEST(suite, bdev_compare_emulated); 7744 CU_ADD_TEST(suite, bdev_zcopy_write); 7745 CU_ADD_TEST(suite, bdev_zcopy_read); 7746 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7747 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7748 CU_ADD_TEST(suite, bdev_open_ext_test); 7749 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7750 CU_ADD_TEST(suite, bdev_set_io_timeout); 7751 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7752 CU_ADD_TEST(suite, lba_range_overlap); 7753 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7754 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7755 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7756 CU_ADD_TEST(suite, bdev_quiesce); 7757 CU_ADD_TEST(suite, bdev_io_abort); 7758 CU_ADD_TEST(suite, bdev_unmap); 7759 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7760 CU_ADD_TEST(suite, bdev_set_options_test); 7761 CU_ADD_TEST(suite, bdev_get_memory_domains); 7762 CU_ADD_TEST(suite, bdev_io_ext); 7763 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7764 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7765 CU_ADD_TEST(suite, bdev_io_ext_split); 7766 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7767 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7768 CU_ADD_TEST(suite, bdev_unregister_by_name); 7769 CU_ADD_TEST(suite, for_each_bdev_test); 7770 CU_ADD_TEST(suite, bdev_seek_test); 7771 CU_ADD_TEST(suite, bdev_copy); 7772 CU_ADD_TEST(suite, bdev_copy_split_test); 7773 CU_ADD_TEST(suite, examine_locks); 7774 CU_ADD_TEST(suite, claim_v2_rwo); 7775 CU_ADD_TEST(suite, claim_v2_rom); 7776 CU_ADD_TEST(suite, claim_v2_rwm); 7777 CU_ADD_TEST(suite, claim_v2_existing_writer); 7778 CU_ADD_TEST(suite, claim_v2_existing_v1); 7779 CU_ADD_TEST(suite, claim_v1_existing_v2); 7780 CU_ADD_TEST(suite, examine_claimed); 7781 CU_ADD_TEST(suite, examine_claimed_manual); 7782 CU_ADD_TEST(suite, get_numa_id); 7783 CU_ADD_TEST(suite, get_device_stat_with_reset); 7784 7785 allocate_cores(1); 7786 allocate_threads(1); 7787 set_thread(0); 7788 7789 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7790 CU_cleanup_registry(); 7791 7792 free_threads(); 7793 free_cores(); 7794 7795 return num_failures; 7796 } 7797