1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t src_offset; 90 uint64_t length; 91 int iovcnt; 92 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 93 void *md_buf; 94 struct spdk_bdev_ext_io_opts *ext_io_opts; 95 bool copy_opts; 96 TAILQ_ENTRY(ut_expected_io) link; 97 }; 98 99 struct bdev_ut_channel { 100 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 101 uint32_t outstanding_io_count; 102 TAILQ_HEAD(, ut_expected_io) expected_io; 103 }; 104 105 static bool g_io_done; 106 static struct spdk_bdev_io *g_bdev_io; 107 static enum spdk_bdev_io_status g_io_status; 108 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 109 static uint32_t g_bdev_ut_io_device; 110 static struct bdev_ut_channel *g_bdev_ut_channel; 111 static void *g_compare_read_buf; 112 static uint32_t g_compare_read_buf_len; 113 static void *g_compare_write_buf; 114 static uint32_t g_compare_write_buf_len; 115 static void *g_compare_md_buf; 116 static bool g_abort_done; 117 static enum spdk_bdev_io_status g_abort_status; 118 static void *g_zcopy_read_buf; 119 static uint32_t g_zcopy_read_buf_len; 120 static void *g_zcopy_write_buf; 121 static uint32_t g_zcopy_write_buf_len; 122 static struct spdk_bdev_io *g_zcopy_bdev_io; 123 static uint64_t g_seek_data_offset; 124 static uint64_t g_seek_hole_offset; 125 static uint64_t g_seek_offset; 126 127 static struct ut_expected_io * 128 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 129 { 130 struct ut_expected_io *expected_io; 131 132 expected_io = calloc(1, sizeof(*expected_io)); 133 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 134 135 expected_io->type = type; 136 expected_io->offset = offset; 137 expected_io->length = length; 138 expected_io->iovcnt = iovcnt; 139 140 return expected_io; 141 } 142 143 static struct ut_expected_io * 144 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 145 { 146 struct ut_expected_io *expected_io; 147 148 expected_io = calloc(1, sizeof(*expected_io)); 149 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 150 151 expected_io->type = type; 152 expected_io->offset = offset; 153 expected_io->src_offset = src_offset; 154 expected_io->length = length; 155 156 return expected_io; 157 } 158 159 static void 160 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 161 { 162 expected_io->iov[pos].iov_base = base; 163 expected_io->iov[pos].iov_len = len; 164 } 165 166 static void 167 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 168 { 169 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 170 struct ut_expected_io *expected_io; 171 struct iovec *iov, *expected_iov; 172 struct spdk_bdev_io *bio_to_abort; 173 int i; 174 175 g_bdev_io = bdev_io; 176 177 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 178 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 179 180 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 181 CU_ASSERT(g_compare_read_buf_len == len); 182 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 183 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 184 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 185 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 186 } 187 } 188 189 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 190 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 191 192 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 193 CU_ASSERT(g_compare_write_buf_len == len); 194 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 195 } 196 197 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 198 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 199 200 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 201 CU_ASSERT(g_compare_read_buf_len == len); 202 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 203 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 204 } 205 if (bdev_io->u.bdev.md_buf && 206 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 207 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 208 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 209 } 210 } 211 212 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 213 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 214 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 215 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 216 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 217 ch->outstanding_io_count--; 218 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 219 break; 220 } 221 } 222 } 223 } 224 225 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 226 if (bdev_io->u.bdev.zcopy.start) { 227 g_zcopy_bdev_io = bdev_io; 228 if (bdev_io->u.bdev.zcopy.populate) { 229 /* Start of a read */ 230 CU_ASSERT(g_zcopy_read_buf != NULL); 231 CU_ASSERT(g_zcopy_read_buf_len > 0); 232 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 233 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 234 bdev_io->u.bdev.iovcnt = 1; 235 } else { 236 /* Start of a write */ 237 CU_ASSERT(g_zcopy_write_buf != NULL); 238 CU_ASSERT(g_zcopy_write_buf_len > 0); 239 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 240 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 241 bdev_io->u.bdev.iovcnt = 1; 242 } 243 } else { 244 if (bdev_io->u.bdev.zcopy.commit) { 245 /* End of write */ 246 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 247 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 248 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 249 g_zcopy_write_buf = NULL; 250 g_zcopy_write_buf_len = 0; 251 } else { 252 /* End of read */ 253 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 254 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 255 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 256 g_zcopy_read_buf = NULL; 257 g_zcopy_read_buf_len = 0; 258 } 259 } 260 } 261 262 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 263 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 264 } 265 266 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 267 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 268 } 269 270 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 271 ch->outstanding_io_count++; 272 273 expected_io = TAILQ_FIRST(&ch->expected_io); 274 if (expected_io == NULL) { 275 return; 276 } 277 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 278 279 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 280 CU_ASSERT(bdev_io->type == expected_io->type); 281 } 282 283 if (expected_io->md_buf != NULL) { 284 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 285 if (bdev_io->u.bdev.ext_opts) { 286 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 287 } 288 } 289 290 if (expected_io->copy_opts) { 291 if (expected_io->ext_io_opts) { 292 /* opts are not NULL so it should have been copied */ 293 CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts); 294 CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy); 295 /* internal opts always points to opts passed */ 296 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts); 297 } else { 298 /* passed opts was NULL so we expect bdev_io opts to be NULL */ 299 CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL); 300 } 301 } else { 302 /* opts were not copied so they should be equal */ 303 CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts); 304 } 305 306 if (expected_io->length == 0) { 307 free(expected_io); 308 return; 309 } 310 311 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 312 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 313 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 314 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 315 } 316 317 if (expected_io->iovcnt == 0) { 318 free(expected_io); 319 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 320 return; 321 } 322 323 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 324 for (i = 0; i < expected_io->iovcnt; i++) { 325 expected_iov = &expected_io->iov[i]; 326 if (bdev_io->internal.orig_iovcnt == 0) { 327 iov = &bdev_io->u.bdev.iovs[i]; 328 } else { 329 iov = bdev_io->internal.orig_iovs; 330 } 331 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 332 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 333 } 334 335 free(expected_io); 336 } 337 338 static void 339 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 340 struct spdk_bdev_io *bdev_io, bool success) 341 { 342 CU_ASSERT(success == true); 343 344 stub_submit_request(_ch, bdev_io); 345 } 346 347 static void 348 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 349 { 350 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 351 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 352 } 353 354 static uint32_t 355 stub_complete_io(uint32_t num_to_complete) 356 { 357 struct bdev_ut_channel *ch = g_bdev_ut_channel; 358 struct spdk_bdev_io *bdev_io; 359 static enum spdk_bdev_io_status io_status; 360 uint32_t num_completed = 0; 361 362 while (num_completed < num_to_complete) { 363 if (TAILQ_EMPTY(&ch->outstanding_io)) { 364 break; 365 } 366 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 367 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 368 ch->outstanding_io_count--; 369 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 370 g_io_exp_status; 371 spdk_bdev_io_complete(bdev_io, io_status); 372 num_completed++; 373 } 374 375 return num_completed; 376 } 377 378 static struct spdk_io_channel * 379 bdev_ut_get_io_channel(void *ctx) 380 { 381 return spdk_get_io_channel(&g_bdev_ut_io_device); 382 } 383 384 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 385 [SPDK_BDEV_IO_TYPE_READ] = true, 386 [SPDK_BDEV_IO_TYPE_WRITE] = true, 387 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 388 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 389 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 390 [SPDK_BDEV_IO_TYPE_RESET] = true, 391 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 392 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 393 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 394 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 395 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 396 [SPDK_BDEV_IO_TYPE_ABORT] = true, 397 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 398 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 399 [SPDK_BDEV_IO_TYPE_COPY] = true, 400 }; 401 402 static void 403 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 404 { 405 g_io_types_supported[io_type] = enable; 406 } 407 408 static bool 409 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 410 { 411 return g_io_types_supported[io_type]; 412 } 413 414 static struct spdk_bdev_fn_table fn_table = { 415 .destruct = stub_destruct, 416 .submit_request = stub_submit_request, 417 .get_io_channel = bdev_ut_get_io_channel, 418 .io_type_supported = stub_io_type_supported, 419 }; 420 421 static int 422 bdev_ut_create_ch(void *io_device, void *ctx_buf) 423 { 424 struct bdev_ut_channel *ch = ctx_buf; 425 426 CU_ASSERT(g_bdev_ut_channel == NULL); 427 g_bdev_ut_channel = ch; 428 429 TAILQ_INIT(&ch->outstanding_io); 430 ch->outstanding_io_count = 0; 431 TAILQ_INIT(&ch->expected_io); 432 return 0; 433 } 434 435 static void 436 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 437 { 438 CU_ASSERT(g_bdev_ut_channel != NULL); 439 g_bdev_ut_channel = NULL; 440 } 441 442 struct spdk_bdev_module bdev_ut_if; 443 444 static int 445 bdev_ut_module_init(void) 446 { 447 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 448 sizeof(struct bdev_ut_channel), NULL); 449 spdk_bdev_module_init_done(&bdev_ut_if); 450 return 0; 451 } 452 453 static void 454 bdev_ut_module_fini(void) 455 { 456 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 457 } 458 459 struct spdk_bdev_module bdev_ut_if = { 460 .name = "bdev_ut", 461 .module_init = bdev_ut_module_init, 462 .module_fini = bdev_ut_module_fini, 463 .async_init = true, 464 }; 465 466 static void vbdev_ut_examine(struct spdk_bdev *bdev); 467 468 static int 469 vbdev_ut_module_init(void) 470 { 471 return 0; 472 } 473 474 static void 475 vbdev_ut_module_fini(void) 476 { 477 } 478 479 struct spdk_bdev_module vbdev_ut_if = { 480 .name = "vbdev_ut", 481 .module_init = vbdev_ut_module_init, 482 .module_fini = vbdev_ut_module_fini, 483 .examine_config = vbdev_ut_examine, 484 }; 485 486 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 487 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 488 489 static void 490 vbdev_ut_examine(struct spdk_bdev *bdev) 491 { 492 spdk_bdev_module_examine_done(&vbdev_ut_if); 493 } 494 495 static struct spdk_bdev * 496 allocate_bdev(char *name) 497 { 498 struct spdk_bdev *bdev; 499 int rc; 500 501 bdev = calloc(1, sizeof(*bdev)); 502 SPDK_CU_ASSERT_FATAL(bdev != NULL); 503 504 bdev->name = name; 505 bdev->fn_table = &fn_table; 506 bdev->module = &bdev_ut_if; 507 bdev->blockcnt = 1024; 508 bdev->blocklen = 512; 509 510 spdk_uuid_generate(&bdev->uuid); 511 512 rc = spdk_bdev_register(bdev); 513 poll_threads(); 514 CU_ASSERT(rc == 0); 515 516 return bdev; 517 } 518 519 static struct spdk_bdev * 520 allocate_vbdev(char *name) 521 { 522 struct spdk_bdev *bdev; 523 int rc; 524 525 bdev = calloc(1, sizeof(*bdev)); 526 SPDK_CU_ASSERT_FATAL(bdev != NULL); 527 528 bdev->name = name; 529 bdev->fn_table = &fn_table; 530 bdev->module = &vbdev_ut_if; 531 532 rc = spdk_bdev_register(bdev); 533 poll_threads(); 534 CU_ASSERT(rc == 0); 535 536 return bdev; 537 } 538 539 static void 540 free_bdev(struct spdk_bdev *bdev) 541 { 542 spdk_bdev_unregister(bdev, NULL, NULL); 543 poll_threads(); 544 memset(bdev, 0xFF, sizeof(*bdev)); 545 free(bdev); 546 } 547 548 static void 549 free_vbdev(struct spdk_bdev *bdev) 550 { 551 spdk_bdev_unregister(bdev, NULL, NULL); 552 poll_threads(); 553 memset(bdev, 0xFF, sizeof(*bdev)); 554 free(bdev); 555 } 556 557 static void 558 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 559 { 560 const char *bdev_name; 561 562 CU_ASSERT(bdev != NULL); 563 CU_ASSERT(rc == 0); 564 bdev_name = spdk_bdev_get_name(bdev); 565 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 566 567 free(stat); 568 569 *(bool *)cb_arg = true; 570 } 571 572 static void 573 bdev_unregister_cb(void *cb_arg, int rc) 574 { 575 g_unregister_arg = cb_arg; 576 g_unregister_rc = rc; 577 } 578 579 static void 580 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 581 { 582 } 583 584 static void 585 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 586 { 587 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 588 589 g_event_type1 = type; 590 if (SPDK_BDEV_EVENT_REMOVE == type) { 591 spdk_bdev_close(desc); 592 } 593 } 594 595 static void 596 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 597 { 598 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 599 600 g_event_type2 = type; 601 if (SPDK_BDEV_EVENT_REMOVE == type) { 602 spdk_bdev_close(desc); 603 } 604 } 605 606 static void 607 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 608 { 609 g_event_type3 = type; 610 } 611 612 static void 613 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 614 { 615 g_event_type4 = type; 616 } 617 618 static void 619 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 620 { 621 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 622 spdk_bdev_free_io(bdev_io); 623 } 624 625 static void 626 get_device_stat_test(void) 627 { 628 struct spdk_bdev *bdev; 629 struct spdk_bdev_io_stat *stat; 630 bool done; 631 632 bdev = allocate_bdev("bdev0"); 633 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 634 if (stat == NULL) { 635 free_bdev(bdev); 636 return; 637 } 638 639 done = false; 640 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 641 while (!done) { poll_threads(); } 642 643 free_bdev(bdev); 644 } 645 646 static void 647 open_write_test(void) 648 { 649 struct spdk_bdev *bdev[9]; 650 struct spdk_bdev_desc *desc[9] = {}; 651 int rc; 652 653 /* 654 * Create a tree of bdevs to test various open w/ write cases. 655 * 656 * bdev0 through bdev3 are physical block devices, such as NVMe 657 * namespaces or Ceph block devices. 658 * 659 * bdev4 is a virtual bdev with multiple base bdevs. This models 660 * caching or RAID use cases. 661 * 662 * bdev5 through bdev7 are all virtual bdevs with the same base 663 * bdev (except bdev7). This models partitioning or logical volume 664 * use cases. 665 * 666 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 667 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 668 * models caching, RAID, partitioning or logical volumes use cases. 669 * 670 * bdev8 is a virtual bdev with multiple base bdevs, but these 671 * base bdevs are themselves virtual bdevs. 672 * 673 * bdev8 674 * | 675 * +----------+ 676 * | | 677 * bdev4 bdev5 bdev6 bdev7 678 * | | | | 679 * +---+---+ +---+ + +---+---+ 680 * | | \ | / \ 681 * bdev0 bdev1 bdev2 bdev3 682 */ 683 684 bdev[0] = allocate_bdev("bdev0"); 685 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 686 CU_ASSERT(rc == 0); 687 688 bdev[1] = allocate_bdev("bdev1"); 689 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 690 CU_ASSERT(rc == 0); 691 692 bdev[2] = allocate_bdev("bdev2"); 693 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 694 CU_ASSERT(rc == 0); 695 696 bdev[3] = allocate_bdev("bdev3"); 697 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 698 CU_ASSERT(rc == 0); 699 700 bdev[4] = allocate_vbdev("bdev4"); 701 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 702 CU_ASSERT(rc == 0); 703 704 bdev[5] = allocate_vbdev("bdev5"); 705 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 706 CU_ASSERT(rc == 0); 707 708 bdev[6] = allocate_vbdev("bdev6"); 709 710 bdev[7] = allocate_vbdev("bdev7"); 711 712 bdev[8] = allocate_vbdev("bdev8"); 713 714 /* Open bdev0 read-only. This should succeed. */ 715 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 716 CU_ASSERT(rc == 0); 717 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 718 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 719 spdk_bdev_close(desc[0]); 720 721 /* 722 * Open bdev1 read/write. This should fail since bdev1 has been claimed 723 * by a vbdev module. 724 */ 725 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 726 CU_ASSERT(rc == -EPERM); 727 728 /* 729 * Open bdev4 read/write. This should fail since bdev3 has been claimed 730 * by a vbdev module. 731 */ 732 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 733 CU_ASSERT(rc == -EPERM); 734 735 /* Open bdev4 read-only. This should succeed. */ 736 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 737 CU_ASSERT(rc == 0); 738 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 739 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 740 spdk_bdev_close(desc[4]); 741 742 /* 743 * Open bdev8 read/write. This should succeed since it is a leaf 744 * bdev. 745 */ 746 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 747 CU_ASSERT(rc == 0); 748 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 749 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 750 spdk_bdev_close(desc[8]); 751 752 /* 753 * Open bdev5 read/write. This should fail since bdev4 has been claimed 754 * by a vbdev module. 755 */ 756 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 757 CU_ASSERT(rc == -EPERM); 758 759 /* Open bdev4 read-only. This should succeed. */ 760 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 761 CU_ASSERT(rc == 0); 762 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 763 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 764 spdk_bdev_close(desc[5]); 765 766 free_vbdev(bdev[8]); 767 768 free_vbdev(bdev[5]); 769 free_vbdev(bdev[6]); 770 free_vbdev(bdev[7]); 771 772 free_vbdev(bdev[4]); 773 774 free_bdev(bdev[0]); 775 free_bdev(bdev[1]); 776 free_bdev(bdev[2]); 777 free_bdev(bdev[3]); 778 } 779 780 static void 781 claim_test(void) 782 { 783 struct spdk_bdev *bdev; 784 struct spdk_bdev_desc *desc, *open_desc; 785 int rc; 786 uint32_t count; 787 788 /* 789 * A vbdev that uses a read-only bdev may need it to remain read-only. 790 * To do so, it opens the bdev read-only, then claims it without 791 * passing a spdk_bdev_desc. 792 */ 793 bdev = allocate_bdev("bdev0"); 794 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 795 CU_ASSERT(rc == 0); 796 CU_ASSERT(desc->write == false); 797 798 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 799 CU_ASSERT(rc == 0); 800 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 801 802 /* There should be only one open descriptor and it should still be ro */ 803 count = 0; 804 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 805 CU_ASSERT(open_desc == desc); 806 CU_ASSERT(!open_desc->write); 807 count++; 808 } 809 CU_ASSERT(count == 1); 810 811 /* A read-only bdev is upgraded to read-write if desc is passed. */ 812 spdk_bdev_module_release_bdev(bdev); 813 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 814 CU_ASSERT(rc == 0); 815 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 816 817 /* There should be only one open descriptor and it should be rw */ 818 count = 0; 819 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 820 CU_ASSERT(open_desc == desc); 821 CU_ASSERT(open_desc->write); 822 count++; 823 } 824 CU_ASSERT(count == 1); 825 826 spdk_bdev_close(desc); 827 free_bdev(bdev); 828 } 829 830 static void 831 bytes_to_blocks_test(void) 832 { 833 struct spdk_bdev bdev; 834 uint64_t offset_blocks, num_blocks; 835 836 memset(&bdev, 0, sizeof(bdev)); 837 838 bdev.blocklen = 512; 839 840 /* All parameters valid */ 841 offset_blocks = 0; 842 num_blocks = 0; 843 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 844 CU_ASSERT(offset_blocks == 1); 845 CU_ASSERT(num_blocks == 2); 846 847 /* Offset not a block multiple */ 848 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 849 850 /* Length not a block multiple */ 851 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 852 853 /* In case blocklen not the power of two */ 854 bdev.blocklen = 100; 855 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 856 CU_ASSERT(offset_blocks == 1); 857 CU_ASSERT(num_blocks == 2); 858 859 /* Offset not a block multiple */ 860 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 861 862 /* Length not a block multiple */ 863 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 864 } 865 866 static void 867 num_blocks_test(void) 868 { 869 struct spdk_bdev bdev; 870 struct spdk_bdev_desc *desc = NULL; 871 int rc; 872 873 memset(&bdev, 0, sizeof(bdev)); 874 bdev.name = "num_blocks"; 875 bdev.fn_table = &fn_table; 876 bdev.module = &bdev_ut_if; 877 spdk_bdev_register(&bdev); 878 poll_threads(); 879 spdk_bdev_notify_blockcnt_change(&bdev, 50); 880 881 /* Growing block number */ 882 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 883 /* Shrinking block number */ 884 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 885 886 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 887 CU_ASSERT(rc == 0); 888 SPDK_CU_ASSERT_FATAL(desc != NULL); 889 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 890 891 /* Growing block number */ 892 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 893 /* Shrinking block number */ 894 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 895 896 g_event_type1 = 0xFF; 897 /* Growing block number */ 898 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 899 900 poll_threads(); 901 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 902 903 g_event_type1 = 0xFF; 904 /* Growing block number and closing */ 905 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 906 907 spdk_bdev_close(desc); 908 spdk_bdev_unregister(&bdev, NULL, NULL); 909 910 poll_threads(); 911 912 /* Callback is not called for closed device */ 913 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 914 } 915 916 static void 917 io_valid_test(void) 918 { 919 struct spdk_bdev bdev; 920 921 memset(&bdev, 0, sizeof(bdev)); 922 923 bdev.blocklen = 512; 924 spdk_spin_init(&bdev.internal.spinlock); 925 926 spdk_bdev_notify_blockcnt_change(&bdev, 100); 927 928 /* All parameters valid */ 929 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 930 931 /* Last valid block */ 932 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 933 934 /* Offset past end of bdev */ 935 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 936 937 /* Offset + length past end of bdev */ 938 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 939 940 /* Offset near end of uint64_t range (2^64 - 1) */ 941 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 942 943 spdk_spin_destroy(&bdev.internal.spinlock); 944 } 945 946 static void 947 alias_add_del_test(void) 948 { 949 struct spdk_bdev *bdev[3]; 950 int rc; 951 952 /* Creating and registering bdevs */ 953 bdev[0] = allocate_bdev("bdev0"); 954 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 955 956 bdev[1] = allocate_bdev("bdev1"); 957 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 958 959 bdev[2] = allocate_bdev("bdev2"); 960 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 961 962 poll_threads(); 963 964 /* 965 * Trying adding an alias identical to name. 966 * Alias is identical to name, so it can not be added to aliases list 967 */ 968 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 969 CU_ASSERT(rc == -EEXIST); 970 971 /* 972 * Trying to add empty alias, 973 * this one should fail 974 */ 975 rc = spdk_bdev_alias_add(bdev[0], NULL); 976 CU_ASSERT(rc == -EINVAL); 977 978 /* Trying adding same alias to two different registered bdevs */ 979 980 /* Alias is used first time, so this one should pass */ 981 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 982 CU_ASSERT(rc == 0); 983 984 /* Alias was added to another bdev, so this one should fail */ 985 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 986 CU_ASSERT(rc == -EEXIST); 987 988 /* Alias is used first time, so this one should pass */ 989 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 990 CU_ASSERT(rc == 0); 991 992 /* Trying removing an alias from registered bdevs */ 993 994 /* Alias is not on a bdev aliases list, so this one should fail */ 995 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 996 CU_ASSERT(rc == -ENOENT); 997 998 /* Alias is present on a bdev aliases list, so this one should pass */ 999 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1000 CU_ASSERT(rc == 0); 1001 1002 /* Alias is present on a bdev aliases list, so this one should pass */ 1003 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1004 CU_ASSERT(rc == 0); 1005 1006 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1007 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1008 CU_ASSERT(rc != 0); 1009 1010 /* Trying to del all alias from empty alias list */ 1011 spdk_bdev_alias_del_all(bdev[2]); 1012 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1013 1014 /* Trying to del all alias from non-empty alias list */ 1015 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1016 CU_ASSERT(rc == 0); 1017 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1018 CU_ASSERT(rc == 0); 1019 spdk_bdev_alias_del_all(bdev[2]); 1020 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1021 1022 /* Unregister and free bdevs */ 1023 spdk_bdev_unregister(bdev[0], NULL, NULL); 1024 spdk_bdev_unregister(bdev[1], NULL, NULL); 1025 spdk_bdev_unregister(bdev[2], NULL, NULL); 1026 1027 poll_threads(); 1028 1029 free(bdev[0]); 1030 free(bdev[1]); 1031 free(bdev[2]); 1032 } 1033 1034 static void 1035 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1036 { 1037 g_io_done = true; 1038 g_io_status = bdev_io->internal.status; 1039 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1040 (bdev_io->u.bdev.zcopy.start)) { 1041 g_zcopy_bdev_io = bdev_io; 1042 } else { 1043 spdk_bdev_free_io(bdev_io); 1044 g_zcopy_bdev_io = NULL; 1045 } 1046 } 1047 1048 static void 1049 bdev_init_cb(void *arg, int rc) 1050 { 1051 CU_ASSERT(rc == 0); 1052 } 1053 1054 static void 1055 bdev_fini_cb(void *arg) 1056 { 1057 } 1058 1059 struct bdev_ut_io_wait_entry { 1060 struct spdk_bdev_io_wait_entry entry; 1061 struct spdk_io_channel *io_ch; 1062 struct spdk_bdev_desc *desc; 1063 bool submitted; 1064 }; 1065 1066 static void 1067 io_wait_cb(void *arg) 1068 { 1069 struct bdev_ut_io_wait_entry *entry = arg; 1070 int rc; 1071 1072 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1073 CU_ASSERT(rc == 0); 1074 entry->submitted = true; 1075 } 1076 1077 static void 1078 bdev_io_types_test(void) 1079 { 1080 struct spdk_bdev *bdev; 1081 struct spdk_bdev_desc *desc = NULL; 1082 struct spdk_io_channel *io_ch; 1083 struct spdk_bdev_opts bdev_opts = {}; 1084 int rc; 1085 1086 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1087 bdev_opts.bdev_io_pool_size = 4; 1088 bdev_opts.bdev_io_cache_size = 2; 1089 1090 rc = spdk_bdev_set_opts(&bdev_opts); 1091 CU_ASSERT(rc == 0); 1092 spdk_bdev_initialize(bdev_init_cb, NULL); 1093 poll_threads(); 1094 1095 bdev = allocate_bdev("bdev0"); 1096 1097 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1098 CU_ASSERT(rc == 0); 1099 poll_threads(); 1100 SPDK_CU_ASSERT_FATAL(desc != NULL); 1101 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1102 io_ch = spdk_bdev_get_io_channel(desc); 1103 CU_ASSERT(io_ch != NULL); 1104 1105 /* WRITE and WRITE ZEROES are not supported */ 1106 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1107 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1108 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1109 CU_ASSERT(rc == -ENOTSUP); 1110 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1111 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1112 1113 /* COPY is not supported */ 1114 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 1115 rc = spdk_bdev_copy_blocks(desc, io_ch, 128, 0, 128, io_done, NULL); 1116 CU_ASSERT(rc == -ENOTSUP); 1117 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 1118 1119 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1120 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1121 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1122 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1123 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1124 CU_ASSERT(rc == -ENOTSUP); 1125 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1126 CU_ASSERT(rc == -ENOTSUP); 1127 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1128 CU_ASSERT(rc == -ENOTSUP); 1129 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1130 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1131 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1132 1133 spdk_put_io_channel(io_ch); 1134 spdk_bdev_close(desc); 1135 free_bdev(bdev); 1136 spdk_bdev_finish(bdev_fini_cb, NULL); 1137 poll_threads(); 1138 } 1139 1140 static void 1141 bdev_io_wait_test(void) 1142 { 1143 struct spdk_bdev *bdev; 1144 struct spdk_bdev_desc *desc = NULL; 1145 struct spdk_io_channel *io_ch; 1146 struct spdk_bdev_opts bdev_opts = {}; 1147 struct bdev_ut_io_wait_entry io_wait_entry; 1148 struct bdev_ut_io_wait_entry io_wait_entry2; 1149 int rc; 1150 1151 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1152 bdev_opts.bdev_io_pool_size = 4; 1153 bdev_opts.bdev_io_cache_size = 2; 1154 1155 rc = spdk_bdev_set_opts(&bdev_opts); 1156 CU_ASSERT(rc == 0); 1157 spdk_bdev_initialize(bdev_init_cb, NULL); 1158 poll_threads(); 1159 1160 bdev = allocate_bdev("bdev0"); 1161 1162 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1163 CU_ASSERT(rc == 0); 1164 poll_threads(); 1165 SPDK_CU_ASSERT_FATAL(desc != NULL); 1166 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1167 io_ch = spdk_bdev_get_io_channel(desc); 1168 CU_ASSERT(io_ch != NULL); 1169 1170 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1171 CU_ASSERT(rc == 0); 1172 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1173 CU_ASSERT(rc == 0); 1174 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1175 CU_ASSERT(rc == 0); 1176 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1177 CU_ASSERT(rc == 0); 1178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1179 1180 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1181 CU_ASSERT(rc == -ENOMEM); 1182 1183 io_wait_entry.entry.bdev = bdev; 1184 io_wait_entry.entry.cb_fn = io_wait_cb; 1185 io_wait_entry.entry.cb_arg = &io_wait_entry; 1186 io_wait_entry.io_ch = io_ch; 1187 io_wait_entry.desc = desc; 1188 io_wait_entry.submitted = false; 1189 /* Cannot use the same io_wait_entry for two different calls. */ 1190 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1191 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1192 1193 /* Queue two I/O waits. */ 1194 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1195 CU_ASSERT(rc == 0); 1196 CU_ASSERT(io_wait_entry.submitted == false); 1197 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1198 CU_ASSERT(rc == 0); 1199 CU_ASSERT(io_wait_entry2.submitted == false); 1200 1201 stub_complete_io(1); 1202 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1203 CU_ASSERT(io_wait_entry.submitted == true); 1204 CU_ASSERT(io_wait_entry2.submitted == false); 1205 1206 stub_complete_io(1); 1207 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1208 CU_ASSERT(io_wait_entry2.submitted == true); 1209 1210 stub_complete_io(4); 1211 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1212 1213 spdk_put_io_channel(io_ch); 1214 spdk_bdev_close(desc); 1215 free_bdev(bdev); 1216 spdk_bdev_finish(bdev_fini_cb, NULL); 1217 poll_threads(); 1218 } 1219 1220 static void 1221 bdev_io_spans_split_test(void) 1222 { 1223 struct spdk_bdev bdev; 1224 struct spdk_bdev_io bdev_io; 1225 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1226 1227 memset(&bdev, 0, sizeof(bdev)); 1228 bdev_io.u.bdev.iovs = iov; 1229 1230 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1231 bdev.optimal_io_boundary = 0; 1232 bdev.max_segment_size = 0; 1233 bdev.max_num_segments = 0; 1234 bdev_io.bdev = &bdev; 1235 1236 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1237 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1238 1239 bdev.split_on_optimal_io_boundary = true; 1240 bdev.optimal_io_boundary = 32; 1241 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1242 1243 /* RESETs are not based on LBAs - so this should return false. */ 1244 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1245 1246 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1247 bdev_io.u.bdev.offset_blocks = 0; 1248 bdev_io.u.bdev.num_blocks = 32; 1249 1250 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1251 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1252 1253 bdev_io.u.bdev.num_blocks = 33; 1254 1255 /* This I/O spans a boundary. */ 1256 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1257 1258 bdev_io.u.bdev.num_blocks = 32; 1259 bdev.max_segment_size = 512 * 32; 1260 bdev.max_num_segments = 1; 1261 bdev_io.u.bdev.iovcnt = 1; 1262 iov[0].iov_len = 512; 1263 1264 /* Does not cross and exceed max_size or max_segs */ 1265 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1266 1267 bdev.split_on_optimal_io_boundary = false; 1268 bdev.max_segment_size = 512; 1269 bdev.max_num_segments = 1; 1270 bdev_io.u.bdev.iovcnt = 2; 1271 1272 /* Exceed max_segs */ 1273 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1274 1275 bdev.max_num_segments = 2; 1276 iov[0].iov_len = 513; 1277 iov[1].iov_len = 512; 1278 1279 /* Exceed max_sizes */ 1280 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1281 1282 bdev.max_segment_size = 0; 1283 bdev.write_unit_size = 32; 1284 bdev.split_on_write_unit = true; 1285 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1286 1287 /* This I/O is one write unit */ 1288 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1289 1290 bdev_io.u.bdev.num_blocks = 32 * 2; 1291 1292 /* This I/O is more than one write unit */ 1293 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1294 1295 bdev_io.u.bdev.offset_blocks = 1; 1296 bdev_io.u.bdev.num_blocks = 32; 1297 1298 /* This I/O is not aligned to write unit size */ 1299 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1300 } 1301 1302 static void 1303 bdev_io_boundary_split_test(void) 1304 { 1305 struct spdk_bdev *bdev; 1306 struct spdk_bdev_desc *desc = NULL; 1307 struct spdk_io_channel *io_ch; 1308 struct spdk_bdev_opts bdev_opts = {}; 1309 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1310 struct ut_expected_io *expected_io; 1311 void *md_buf = (void *)0xFF000000; 1312 uint64_t i; 1313 int rc; 1314 1315 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1316 bdev_opts.bdev_io_pool_size = 512; 1317 bdev_opts.bdev_io_cache_size = 64; 1318 1319 rc = spdk_bdev_set_opts(&bdev_opts); 1320 CU_ASSERT(rc == 0); 1321 spdk_bdev_initialize(bdev_init_cb, NULL); 1322 1323 bdev = allocate_bdev("bdev0"); 1324 1325 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1326 CU_ASSERT(rc == 0); 1327 SPDK_CU_ASSERT_FATAL(desc != NULL); 1328 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1329 io_ch = spdk_bdev_get_io_channel(desc); 1330 CU_ASSERT(io_ch != NULL); 1331 1332 bdev->optimal_io_boundary = 16; 1333 bdev->split_on_optimal_io_boundary = false; 1334 1335 g_io_done = false; 1336 1337 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1339 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1340 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1341 1342 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1343 CU_ASSERT(rc == 0); 1344 CU_ASSERT(g_io_done == false); 1345 1346 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1347 stub_complete_io(1); 1348 CU_ASSERT(g_io_done == true); 1349 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1350 1351 bdev->split_on_optimal_io_boundary = true; 1352 bdev->md_interleave = false; 1353 bdev->md_len = 8; 1354 1355 /* Now test that a single-vector command is split correctly. 1356 * Offset 14, length 8, payload 0xF000 1357 * Child - Offset 14, length 2, payload 0xF000 1358 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1359 * 1360 * Set up the expected values before calling spdk_bdev_read_blocks 1361 */ 1362 g_io_done = false; 1363 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1364 expected_io->md_buf = md_buf; 1365 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1366 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1367 1368 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1369 expected_io->md_buf = md_buf + 2 * 8; 1370 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1371 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1372 1373 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1374 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1375 14, 8, io_done, NULL); 1376 CU_ASSERT(rc == 0); 1377 CU_ASSERT(g_io_done == false); 1378 1379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1380 stub_complete_io(2); 1381 CU_ASSERT(g_io_done == true); 1382 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1383 1384 /* Now set up a more complex, multi-vector command that needs to be split, 1385 * including splitting iovecs. 1386 */ 1387 iov[0].iov_base = (void *)0x10000; 1388 iov[0].iov_len = 512; 1389 iov[1].iov_base = (void *)0x20000; 1390 iov[1].iov_len = 20 * 512; 1391 iov[2].iov_base = (void *)0x30000; 1392 iov[2].iov_len = 11 * 512; 1393 1394 g_io_done = false; 1395 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1396 expected_io->md_buf = md_buf; 1397 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1398 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1399 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1400 1401 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1402 expected_io->md_buf = md_buf + 2 * 8; 1403 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1404 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1405 1406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1407 expected_io->md_buf = md_buf + 18 * 8; 1408 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1409 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1410 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1411 1412 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1413 14, 32, io_done, NULL); 1414 CU_ASSERT(rc == 0); 1415 CU_ASSERT(g_io_done == false); 1416 1417 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1418 stub_complete_io(3); 1419 CU_ASSERT(g_io_done == true); 1420 1421 /* Test multi vector command that needs to be split by strip and then needs to be 1422 * split further due to the capacity of child iovs. 1423 */ 1424 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1425 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1426 iov[i].iov_len = 512; 1427 } 1428 1429 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1430 g_io_done = false; 1431 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1432 SPDK_BDEV_IO_NUM_CHILD_IOV); 1433 expected_io->md_buf = md_buf; 1434 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1435 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1436 } 1437 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1438 1439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1440 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1441 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1442 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1443 ut_expected_io_set_iov(expected_io, i, 1444 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1445 } 1446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1447 1448 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1449 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1450 CU_ASSERT(rc == 0); 1451 CU_ASSERT(g_io_done == false); 1452 1453 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1454 stub_complete_io(1); 1455 CU_ASSERT(g_io_done == false); 1456 1457 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1458 stub_complete_io(1); 1459 CU_ASSERT(g_io_done == true); 1460 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1461 1462 /* Test multi vector command that needs to be split by strip and then needs to be 1463 * split further due to the capacity of child iovs. In this case, the length of 1464 * the rest of iovec array with an I/O boundary is the multiple of block size. 1465 */ 1466 1467 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1468 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1469 */ 1470 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1471 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1472 iov[i].iov_len = 512; 1473 } 1474 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1475 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1476 iov[i].iov_len = 256; 1477 } 1478 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1479 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1480 1481 /* Add an extra iovec to trigger split */ 1482 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1483 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1484 1485 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1486 g_io_done = false; 1487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1488 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1489 expected_io->md_buf = md_buf; 1490 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1491 ut_expected_io_set_iov(expected_io, i, 1492 (void *)((i + 1) * 0x10000), 512); 1493 } 1494 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1495 ut_expected_io_set_iov(expected_io, i, 1496 (void *)((i + 1) * 0x10000), 256); 1497 } 1498 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1499 1500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1501 1, 1); 1502 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1503 ut_expected_io_set_iov(expected_io, 0, 1504 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1505 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1506 1507 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1508 1, 1); 1509 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1510 ut_expected_io_set_iov(expected_io, 0, 1511 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1513 1514 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1515 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1516 CU_ASSERT(rc == 0); 1517 CU_ASSERT(g_io_done == false); 1518 1519 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1520 stub_complete_io(1); 1521 CU_ASSERT(g_io_done == false); 1522 1523 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1524 stub_complete_io(2); 1525 CU_ASSERT(g_io_done == true); 1526 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1527 1528 /* Test multi vector command that needs to be split by strip and then needs to be 1529 * split further due to the capacity of child iovs, the child request offset should 1530 * be rewind to last aligned offset and go success without error. 1531 */ 1532 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1533 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1534 iov[i].iov_len = 512; 1535 } 1536 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1537 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1538 1539 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1540 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1541 1542 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1543 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1544 1545 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1546 g_io_done = false; 1547 g_io_status = 0; 1548 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1549 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1550 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1551 expected_io->md_buf = md_buf; 1552 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1553 ut_expected_io_set_iov(expected_io, i, 1554 (void *)((i + 1) * 0x10000), 512); 1555 } 1556 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1557 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1558 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1559 1, 2); 1560 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1561 ut_expected_io_set_iov(expected_io, 0, 1562 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1563 ut_expected_io_set_iov(expected_io, 1, 1564 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1565 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1566 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1567 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1568 1, 1); 1569 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1570 ut_expected_io_set_iov(expected_io, 0, 1571 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1572 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1573 1574 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1575 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1576 CU_ASSERT(rc == 0); 1577 CU_ASSERT(g_io_done == false); 1578 1579 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1580 stub_complete_io(1); 1581 CU_ASSERT(g_io_done == false); 1582 1583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1584 stub_complete_io(2); 1585 CU_ASSERT(g_io_done == true); 1586 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1587 1588 /* Test multi vector command that needs to be split due to the IO boundary and 1589 * the capacity of child iovs. Especially test the case when the command is 1590 * split due to the capacity of child iovs, the tail address is not aligned with 1591 * block size and is rewinded to the aligned address. 1592 * 1593 * The iovecs used in read request is complex but is based on the data 1594 * collected in the real issue. We change the base addresses but keep the lengths 1595 * not to loose the credibility of the test. 1596 */ 1597 bdev->optimal_io_boundary = 128; 1598 g_io_done = false; 1599 g_io_status = 0; 1600 1601 for (i = 0; i < 31; i++) { 1602 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1603 iov[i].iov_len = 1024; 1604 } 1605 iov[31].iov_base = (void *)0xFEED1F00000; 1606 iov[31].iov_len = 32768; 1607 iov[32].iov_base = (void *)0xFEED2000000; 1608 iov[32].iov_len = 160; 1609 iov[33].iov_base = (void *)0xFEED2100000; 1610 iov[33].iov_len = 4096; 1611 iov[34].iov_base = (void *)0xFEED2200000; 1612 iov[34].iov_len = 4096; 1613 iov[35].iov_base = (void *)0xFEED2300000; 1614 iov[35].iov_len = 4096; 1615 iov[36].iov_base = (void *)0xFEED2400000; 1616 iov[36].iov_len = 4096; 1617 iov[37].iov_base = (void *)0xFEED2500000; 1618 iov[37].iov_len = 4096; 1619 iov[38].iov_base = (void *)0xFEED2600000; 1620 iov[38].iov_len = 4096; 1621 iov[39].iov_base = (void *)0xFEED2700000; 1622 iov[39].iov_len = 4096; 1623 iov[40].iov_base = (void *)0xFEED2800000; 1624 iov[40].iov_len = 4096; 1625 iov[41].iov_base = (void *)0xFEED2900000; 1626 iov[41].iov_len = 4096; 1627 iov[42].iov_base = (void *)0xFEED2A00000; 1628 iov[42].iov_len = 4096; 1629 iov[43].iov_base = (void *)0xFEED2B00000; 1630 iov[43].iov_len = 12288; 1631 iov[44].iov_base = (void *)0xFEED2C00000; 1632 iov[44].iov_len = 8192; 1633 iov[45].iov_base = (void *)0xFEED2F00000; 1634 iov[45].iov_len = 4096; 1635 iov[46].iov_base = (void *)0xFEED3000000; 1636 iov[46].iov_len = 4096; 1637 iov[47].iov_base = (void *)0xFEED3100000; 1638 iov[47].iov_len = 4096; 1639 iov[48].iov_base = (void *)0xFEED3200000; 1640 iov[48].iov_len = 24576; 1641 iov[49].iov_base = (void *)0xFEED3300000; 1642 iov[49].iov_len = 16384; 1643 iov[50].iov_base = (void *)0xFEED3400000; 1644 iov[50].iov_len = 12288; 1645 iov[51].iov_base = (void *)0xFEED3500000; 1646 iov[51].iov_len = 4096; 1647 iov[52].iov_base = (void *)0xFEED3600000; 1648 iov[52].iov_len = 4096; 1649 iov[53].iov_base = (void *)0xFEED3700000; 1650 iov[53].iov_len = 4096; 1651 iov[54].iov_base = (void *)0xFEED3800000; 1652 iov[54].iov_len = 28672; 1653 iov[55].iov_base = (void *)0xFEED3900000; 1654 iov[55].iov_len = 20480; 1655 iov[56].iov_base = (void *)0xFEED3A00000; 1656 iov[56].iov_len = 4096; 1657 iov[57].iov_base = (void *)0xFEED3B00000; 1658 iov[57].iov_len = 12288; 1659 iov[58].iov_base = (void *)0xFEED3C00000; 1660 iov[58].iov_len = 4096; 1661 iov[59].iov_base = (void *)0xFEED3D00000; 1662 iov[59].iov_len = 4096; 1663 iov[60].iov_base = (void *)0xFEED3E00000; 1664 iov[60].iov_len = 352; 1665 1666 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1667 * of child iovs, 1668 */ 1669 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1670 expected_io->md_buf = md_buf; 1671 for (i = 0; i < 32; i++) { 1672 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1673 } 1674 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1675 1676 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1677 * split by the IO boundary requirement. 1678 */ 1679 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1680 expected_io->md_buf = md_buf + 126 * 8; 1681 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1682 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1683 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1684 1685 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1686 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1687 */ 1688 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1689 expected_io->md_buf = md_buf + 128 * 8; 1690 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1691 iov[33].iov_len - 864); 1692 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1693 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1694 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1695 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1696 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1697 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1698 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1699 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1700 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1701 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1702 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1703 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1704 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1705 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1706 1707 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1708 * first 864 bytes of iov[52] split by the IO boundary requirement. 1709 */ 1710 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1711 expected_io->md_buf = md_buf + 256 * 8; 1712 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1713 iov[46].iov_len - 864); 1714 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1715 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1716 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1717 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1718 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1719 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1720 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1721 1722 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1723 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1724 */ 1725 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1726 expected_io->md_buf = md_buf + 384 * 8; 1727 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1728 iov[52].iov_len - 864); 1729 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1730 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1731 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1732 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1733 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1734 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1735 1736 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1737 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1738 */ 1739 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1740 expected_io->md_buf = md_buf + 512 * 8; 1741 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1742 iov[57].iov_len - 4960); 1743 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1744 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1746 1747 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1748 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1749 expected_io->md_buf = md_buf + 542 * 8; 1750 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1751 iov[59].iov_len - 3936); 1752 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1753 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1754 1755 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1756 0, 543, io_done, NULL); 1757 CU_ASSERT(rc == 0); 1758 CU_ASSERT(g_io_done == false); 1759 1760 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1761 stub_complete_io(1); 1762 CU_ASSERT(g_io_done == false); 1763 1764 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1765 stub_complete_io(5); 1766 CU_ASSERT(g_io_done == false); 1767 1768 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1769 stub_complete_io(1); 1770 CU_ASSERT(g_io_done == true); 1771 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1772 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1773 1774 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1775 * split, so test that. 1776 */ 1777 bdev->optimal_io_boundary = 15; 1778 g_io_done = false; 1779 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1780 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1781 1782 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1783 CU_ASSERT(rc == 0); 1784 CU_ASSERT(g_io_done == false); 1785 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1786 stub_complete_io(1); 1787 CU_ASSERT(g_io_done == true); 1788 1789 /* Test an UNMAP. This should also not be split. */ 1790 bdev->optimal_io_boundary = 16; 1791 g_io_done = false; 1792 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1793 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1794 1795 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1796 CU_ASSERT(rc == 0); 1797 CU_ASSERT(g_io_done == false); 1798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1799 stub_complete_io(1); 1800 CU_ASSERT(g_io_done == true); 1801 1802 /* Test a FLUSH. This should also not be split. */ 1803 bdev->optimal_io_boundary = 16; 1804 g_io_done = false; 1805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1807 1808 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1809 CU_ASSERT(rc == 0); 1810 CU_ASSERT(g_io_done == false); 1811 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1812 stub_complete_io(1); 1813 CU_ASSERT(g_io_done == true); 1814 1815 /* Test a COPY. This should also not be split. */ 1816 bdev->optimal_io_boundary = 15; 1817 g_io_done = false; 1818 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1819 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1820 1821 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1822 CU_ASSERT(rc == 0); 1823 CU_ASSERT(g_io_done == false); 1824 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1825 stub_complete_io(1); 1826 CU_ASSERT(g_io_done == true); 1827 1828 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1829 1830 /* Children requests return an error status */ 1831 bdev->optimal_io_boundary = 16; 1832 iov[0].iov_base = (void *)0x10000; 1833 iov[0].iov_len = 512 * 64; 1834 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1835 g_io_done = false; 1836 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1837 1838 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1839 CU_ASSERT(rc == 0); 1840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1841 stub_complete_io(4); 1842 CU_ASSERT(g_io_done == false); 1843 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1844 stub_complete_io(1); 1845 CU_ASSERT(g_io_done == true); 1846 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1847 1848 /* Test if a multi vector command terminated with failure before continuing 1849 * splitting process when one of child I/O failed. 1850 * The multi vector command is as same as the above that needs to be split by strip 1851 * and then needs to be split further due to the capacity of child iovs. 1852 */ 1853 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1854 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1855 iov[i].iov_len = 512; 1856 } 1857 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1858 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1859 1860 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1861 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1862 1863 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1864 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1865 1866 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1867 1868 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1869 g_io_done = false; 1870 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1871 1872 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1873 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1874 CU_ASSERT(rc == 0); 1875 CU_ASSERT(g_io_done == false); 1876 1877 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1878 stub_complete_io(1); 1879 CU_ASSERT(g_io_done == true); 1880 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1881 1882 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1883 1884 /* for this test we will create the following conditions to hit the code path where 1885 * we are trying to send and IO following a split that has no iovs because we had to 1886 * trim them for alignment reasons. 1887 * 1888 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1889 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1890 * position 30 and overshoot by 0x2e. 1891 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1892 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1893 * which eliniates that vector so we just send the first split IO with 30 vectors 1894 * and let the completion pick up the last 2 vectors. 1895 */ 1896 bdev->optimal_io_boundary = 32; 1897 bdev->split_on_optimal_io_boundary = true; 1898 g_io_done = false; 1899 1900 /* Init all parent IOVs to 0x212 */ 1901 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1902 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1903 iov[i].iov_len = 0x212; 1904 } 1905 1906 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1907 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1908 /* expect 0-29 to be 1:1 with the parent iov */ 1909 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1910 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1911 } 1912 1913 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1914 * where 0x1e is the amount we overshot the 16K boundary 1915 */ 1916 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1917 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1918 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1919 1920 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1921 * shortened that take it to the next boundary and then a final one to get us to 1922 * 0x4200 bytes for the IO. 1923 */ 1924 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1925 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1926 /* position 30 picked up the remaining bytes to the next boundary */ 1927 ut_expected_io_set_iov(expected_io, 0, 1928 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1929 1930 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1931 ut_expected_io_set_iov(expected_io, 1, 1932 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1933 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1934 1935 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1936 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1937 CU_ASSERT(rc == 0); 1938 CU_ASSERT(g_io_done == false); 1939 1940 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1941 stub_complete_io(1); 1942 CU_ASSERT(g_io_done == false); 1943 1944 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1945 stub_complete_io(1); 1946 CU_ASSERT(g_io_done == true); 1947 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1948 1949 spdk_put_io_channel(io_ch); 1950 spdk_bdev_close(desc); 1951 free_bdev(bdev); 1952 spdk_bdev_finish(bdev_fini_cb, NULL); 1953 poll_threads(); 1954 } 1955 1956 static void 1957 bdev_io_max_size_and_segment_split_test(void) 1958 { 1959 struct spdk_bdev *bdev; 1960 struct spdk_bdev_desc *desc = NULL; 1961 struct spdk_io_channel *io_ch; 1962 struct spdk_bdev_opts bdev_opts = {}; 1963 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1964 struct ut_expected_io *expected_io; 1965 uint64_t i; 1966 int rc; 1967 1968 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1969 bdev_opts.bdev_io_pool_size = 512; 1970 bdev_opts.bdev_io_cache_size = 64; 1971 1972 bdev_opts.opts_size = sizeof(bdev_opts); 1973 rc = spdk_bdev_set_opts(&bdev_opts); 1974 CU_ASSERT(rc == 0); 1975 spdk_bdev_initialize(bdev_init_cb, NULL); 1976 1977 bdev = allocate_bdev("bdev0"); 1978 1979 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1980 CU_ASSERT(rc == 0); 1981 SPDK_CU_ASSERT_FATAL(desc != NULL); 1982 io_ch = spdk_bdev_get_io_channel(desc); 1983 CU_ASSERT(io_ch != NULL); 1984 1985 bdev->split_on_optimal_io_boundary = false; 1986 bdev->optimal_io_boundary = 0; 1987 1988 /* Case 0 max_num_segments == 0. 1989 * but segment size 2 * 512 > 512 1990 */ 1991 bdev->max_segment_size = 512; 1992 bdev->max_num_segments = 0; 1993 g_io_done = false; 1994 1995 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1996 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1997 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1998 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1999 2000 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2001 CU_ASSERT(rc == 0); 2002 CU_ASSERT(g_io_done == false); 2003 2004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2005 stub_complete_io(1); 2006 CU_ASSERT(g_io_done == true); 2007 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2008 2009 /* Case 1 max_segment_size == 0 2010 * but iov num 2 > 1. 2011 */ 2012 bdev->max_segment_size = 0; 2013 bdev->max_num_segments = 1; 2014 g_io_done = false; 2015 2016 iov[0].iov_base = (void *)0x10000; 2017 iov[0].iov_len = 512; 2018 iov[1].iov_base = (void *)0x20000; 2019 iov[1].iov_len = 8 * 512; 2020 2021 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2022 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2023 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2024 2025 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2026 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2027 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2028 2029 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2030 CU_ASSERT(rc == 0); 2031 CU_ASSERT(g_io_done == false); 2032 2033 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2034 stub_complete_io(2); 2035 CU_ASSERT(g_io_done == true); 2036 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2037 2038 /* Test that a non-vector command is split correctly. 2039 * Set up the expected values before calling spdk_bdev_read_blocks 2040 */ 2041 bdev->max_segment_size = 512; 2042 bdev->max_num_segments = 1; 2043 g_io_done = false; 2044 2045 /* Child IO 0 */ 2046 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2047 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2048 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2049 2050 /* Child IO 1 */ 2051 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2052 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2054 2055 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2056 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2057 CU_ASSERT(rc == 0); 2058 CU_ASSERT(g_io_done == false); 2059 2060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2061 stub_complete_io(2); 2062 CU_ASSERT(g_io_done == true); 2063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2064 2065 /* Now set up a more complex, multi-vector command that needs to be split, 2066 * including splitting iovecs. 2067 */ 2068 bdev->max_segment_size = 2 * 512; 2069 bdev->max_num_segments = 1; 2070 g_io_done = false; 2071 2072 iov[0].iov_base = (void *)0x10000; 2073 iov[0].iov_len = 2 * 512; 2074 iov[1].iov_base = (void *)0x20000; 2075 iov[1].iov_len = 4 * 512; 2076 iov[2].iov_base = (void *)0x30000; 2077 iov[2].iov_len = 6 * 512; 2078 2079 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2080 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2081 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2082 2083 /* Split iov[1].size to 2 iov entries then split the segments */ 2084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2085 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2087 2088 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2089 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2090 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2091 2092 /* Split iov[2].size to 3 iov entries then split the segments */ 2093 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2094 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2095 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2096 2097 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2098 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2099 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2100 2101 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2102 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2103 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2104 2105 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2106 CU_ASSERT(rc == 0); 2107 CU_ASSERT(g_io_done == false); 2108 2109 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2110 stub_complete_io(6); 2111 CU_ASSERT(g_io_done == true); 2112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2113 2114 /* Test multi vector command that needs to be split by strip and then needs to be 2115 * split further due to the capacity of parent IO child iovs. 2116 */ 2117 bdev->max_segment_size = 512; 2118 bdev->max_num_segments = 1; 2119 g_io_done = false; 2120 2121 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2122 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2123 iov[i].iov_len = 512 * 2; 2124 } 2125 2126 /* Each input iov.size is split into 2 iovs, 2127 * half of the input iov can fill all child iov entries of a single IO. 2128 */ 2129 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2130 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2131 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2133 2134 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2135 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2136 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2137 } 2138 2139 /* The remaining iov is split in the second round */ 2140 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2141 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2142 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2144 2145 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2146 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2147 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2148 } 2149 2150 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2151 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2152 CU_ASSERT(rc == 0); 2153 CU_ASSERT(g_io_done == false); 2154 2155 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2156 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2157 CU_ASSERT(g_io_done == false); 2158 2159 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2160 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2161 CU_ASSERT(g_io_done == true); 2162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2163 2164 /* A wrong case, a child IO that is divided does 2165 * not meet the principle of multiples of block size, 2166 * and exits with error 2167 */ 2168 bdev->max_segment_size = 512; 2169 bdev->max_num_segments = 1; 2170 g_io_done = false; 2171 2172 iov[0].iov_base = (void *)0x10000; 2173 iov[0].iov_len = 512 + 256; 2174 iov[1].iov_base = (void *)0x20000; 2175 iov[1].iov_len = 256; 2176 2177 /* iov[0] is split to 512 and 256. 2178 * 256 is less than a block size, and it is found 2179 * in the next round of split that it is the first child IO smaller than 2180 * the block size, so the error exit 2181 */ 2182 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2183 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2184 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2185 2186 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2187 CU_ASSERT(rc == 0); 2188 CU_ASSERT(g_io_done == false); 2189 2190 /* First child IO is OK */ 2191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2192 stub_complete_io(1); 2193 CU_ASSERT(g_io_done == true); 2194 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2195 2196 /* error exit */ 2197 stub_complete_io(1); 2198 CU_ASSERT(g_io_done == true); 2199 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2201 2202 /* Test multi vector command that needs to be split by strip and then needs to be 2203 * split further due to the capacity of child iovs. 2204 * 2205 * In this case, the last two iovs need to be split, but it will exceed the capacity 2206 * of child iovs, so it needs to wait until the first batch completed. 2207 */ 2208 bdev->max_segment_size = 512; 2209 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2210 g_io_done = false; 2211 2212 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2213 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2214 iov[i].iov_len = 512; 2215 } 2216 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2217 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2218 iov[i].iov_len = 512 * 2; 2219 } 2220 2221 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2222 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2223 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2224 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2225 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2226 } 2227 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2228 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2229 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2230 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2231 2232 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2233 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2234 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2235 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2237 2238 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2239 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2240 CU_ASSERT(rc == 0); 2241 CU_ASSERT(g_io_done == false); 2242 2243 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2244 stub_complete_io(1); 2245 CU_ASSERT(g_io_done == false); 2246 2247 /* Next round */ 2248 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2249 stub_complete_io(1); 2250 CU_ASSERT(g_io_done == true); 2251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2252 2253 /* This case is similar to the previous one, but the io composed of 2254 * the last few entries of child iov is not enough for a blocklen, so they 2255 * cannot be put into this IO, but wait until the next time. 2256 */ 2257 bdev->max_segment_size = 512; 2258 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2259 g_io_done = false; 2260 2261 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2262 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2263 iov[i].iov_len = 512; 2264 } 2265 2266 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2267 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2268 iov[i].iov_len = 128; 2269 } 2270 2271 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2272 * Because the left 2 iov is not enough for a blocklen. 2273 */ 2274 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2275 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2276 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2277 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2278 } 2279 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2280 2281 /* The second child io waits until the end of the first child io before executing. 2282 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2283 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2284 */ 2285 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2286 1, 4); 2287 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2288 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2289 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2290 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2291 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2292 2293 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2294 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2295 CU_ASSERT(rc == 0); 2296 CU_ASSERT(g_io_done == false); 2297 2298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2299 stub_complete_io(1); 2300 CU_ASSERT(g_io_done == false); 2301 2302 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2303 stub_complete_io(1); 2304 CU_ASSERT(g_io_done == true); 2305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2306 2307 /* A very complicated case. Each sg entry exceeds max_segment_size and 2308 * needs to be split. At the same time, child io must be a multiple of blocklen. 2309 * At the same time, child iovcnt exceeds parent iovcnt. 2310 */ 2311 bdev->max_segment_size = 512 + 128; 2312 bdev->max_num_segments = 3; 2313 g_io_done = false; 2314 2315 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2316 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2317 iov[i].iov_len = 512 + 256; 2318 } 2319 2320 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2321 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2322 iov[i].iov_len = 512 + 128; 2323 } 2324 2325 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2326 * Consume 4 parent IO iov entries per for() round and 6 block size. 2327 * Generate 9 child IOs. 2328 */ 2329 for (i = 0; i < 3; i++) { 2330 uint32_t j = i * 4; 2331 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2332 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2333 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2334 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2335 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2336 2337 /* Child io must be a multiple of blocklen 2338 * iov[j + 2] must be split. If the third entry is also added, 2339 * the multiple of blocklen cannot be guaranteed. But it still 2340 * occupies one iov entry of the parent child iov. 2341 */ 2342 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2343 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2344 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2345 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2346 2347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2348 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2349 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2350 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2351 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2352 } 2353 2354 /* Child iov position at 27, the 10th child IO 2355 * iov entry index is 3 * 4 and offset is 3 * 6 2356 */ 2357 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2358 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2359 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2360 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2362 2363 /* Child iov position at 30, the 11th child IO */ 2364 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2365 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2366 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2367 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2368 2369 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2371 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2372 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2373 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2374 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2375 2376 /* Consume 9 child IOs and 27 child iov entries. 2377 * Consume 4 parent IO iov entries per for() round and 6 block size. 2378 * Parent IO iov index start from 16 and block offset start from 24 2379 */ 2380 for (i = 0; i < 3; i++) { 2381 uint32_t j = i * 4 + 16; 2382 uint32_t offset = i * 6 + 24; 2383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2384 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2385 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2386 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2387 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2388 2389 /* Child io must be a multiple of blocklen 2390 * iov[j + 2] must be split. If the third entry is also added, 2391 * the multiple of blocklen cannot be guaranteed. But it still 2392 * occupies one iov entry of the parent child iov. 2393 */ 2394 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2395 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2396 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2397 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2398 2399 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2400 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2401 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2402 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2404 } 2405 2406 /* The 22th child IO, child iov position at 30 */ 2407 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2408 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2410 2411 /* The third round */ 2412 /* Here is the 23nd child IO and child iovpos is 0 */ 2413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2414 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2415 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2416 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2417 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2418 2419 /* The 24th child IO */ 2420 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2421 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2422 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2423 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 2426 /* The 25th child IO */ 2427 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2428 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2429 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2433 50, io_done, NULL); 2434 CU_ASSERT(rc == 0); 2435 CU_ASSERT(g_io_done == false); 2436 2437 /* Parent IO supports up to 32 child iovs, so it is calculated that 2438 * a maximum of 11 IOs can be split at a time, and the 2439 * splitting will continue after the first batch is over. 2440 */ 2441 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2442 stub_complete_io(11); 2443 CU_ASSERT(g_io_done == false); 2444 2445 /* The 2nd round */ 2446 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2447 stub_complete_io(11); 2448 CU_ASSERT(g_io_done == false); 2449 2450 /* The last round */ 2451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2452 stub_complete_io(3); 2453 CU_ASSERT(g_io_done == true); 2454 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2455 2456 /* Test an WRITE_ZEROES. This should also not be split. */ 2457 bdev->max_segment_size = 512; 2458 bdev->max_num_segments = 1; 2459 g_io_done = false; 2460 2461 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2462 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2463 2464 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2465 CU_ASSERT(rc == 0); 2466 CU_ASSERT(g_io_done == false); 2467 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2468 stub_complete_io(1); 2469 CU_ASSERT(g_io_done == true); 2470 2471 /* Test an UNMAP. This should also not be split. */ 2472 g_io_done = false; 2473 2474 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2475 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2476 2477 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2478 CU_ASSERT(rc == 0); 2479 CU_ASSERT(g_io_done == false); 2480 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2481 stub_complete_io(1); 2482 CU_ASSERT(g_io_done == true); 2483 2484 /* Test a FLUSH. This should also not be split. */ 2485 g_io_done = false; 2486 2487 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2491 CU_ASSERT(rc == 0); 2492 CU_ASSERT(g_io_done == false); 2493 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2494 stub_complete_io(1); 2495 CU_ASSERT(g_io_done == true); 2496 2497 /* Test a COPY. This should also not be split. */ 2498 g_io_done = false; 2499 2500 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2501 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2502 2503 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2504 CU_ASSERT(rc == 0); 2505 CU_ASSERT(g_io_done == false); 2506 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2507 stub_complete_io(1); 2508 CU_ASSERT(g_io_done == true); 2509 2510 spdk_put_io_channel(io_ch); 2511 spdk_bdev_close(desc); 2512 free_bdev(bdev); 2513 spdk_bdev_finish(bdev_fini_cb, NULL); 2514 poll_threads(); 2515 } 2516 2517 static void 2518 bdev_io_mix_split_test(void) 2519 { 2520 struct spdk_bdev *bdev; 2521 struct spdk_bdev_desc *desc = NULL; 2522 struct spdk_io_channel *io_ch; 2523 struct spdk_bdev_opts bdev_opts = {}; 2524 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2525 struct ut_expected_io *expected_io; 2526 uint64_t i; 2527 int rc; 2528 2529 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2530 bdev_opts.bdev_io_pool_size = 512; 2531 bdev_opts.bdev_io_cache_size = 64; 2532 2533 rc = spdk_bdev_set_opts(&bdev_opts); 2534 CU_ASSERT(rc == 0); 2535 spdk_bdev_initialize(bdev_init_cb, NULL); 2536 2537 bdev = allocate_bdev("bdev0"); 2538 2539 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2540 CU_ASSERT(rc == 0); 2541 SPDK_CU_ASSERT_FATAL(desc != NULL); 2542 io_ch = spdk_bdev_get_io_channel(desc); 2543 CU_ASSERT(io_ch != NULL); 2544 2545 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2546 bdev->split_on_optimal_io_boundary = true; 2547 bdev->optimal_io_boundary = 16; 2548 2549 bdev->max_segment_size = 512; 2550 bdev->max_num_segments = 16; 2551 g_io_done = false; 2552 2553 /* IO crossing the IO boundary requires split 2554 * Total 2 child IOs. 2555 */ 2556 2557 /* The 1st child IO split the segment_size to multiple segment entry */ 2558 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2559 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2560 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2561 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2562 2563 /* The 2nd child IO split the segment_size to multiple segment entry */ 2564 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2565 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2566 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2567 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2568 2569 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2570 CU_ASSERT(rc == 0); 2571 CU_ASSERT(g_io_done == false); 2572 2573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2574 stub_complete_io(2); 2575 CU_ASSERT(g_io_done == true); 2576 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2577 2578 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2579 bdev->max_segment_size = 15 * 512; 2580 bdev->max_num_segments = 1; 2581 g_io_done = false; 2582 2583 /* IO crossing the IO boundary requires split. 2584 * The 1st child IO segment size exceeds the max_segment_size, 2585 * So 1st child IO will be split to multiple segment entry. 2586 * Then it split to 2 child IOs because of the max_num_segments. 2587 * Total 3 child IOs. 2588 */ 2589 2590 /* The first 2 IOs are in an IO boundary. 2591 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2592 * So it split to the first 2 IOs. 2593 */ 2594 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2595 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2596 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2597 2598 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2599 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2600 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2601 2602 /* The 3rd Child IO is because of the io boundary */ 2603 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2604 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2606 2607 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2608 CU_ASSERT(rc == 0); 2609 CU_ASSERT(g_io_done == false); 2610 2611 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2612 stub_complete_io(3); 2613 CU_ASSERT(g_io_done == true); 2614 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2615 2616 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2617 bdev->max_segment_size = 17 * 512; 2618 bdev->max_num_segments = 1; 2619 g_io_done = false; 2620 2621 /* IO crossing the IO boundary requires split. 2622 * Child IO does not split. 2623 * Total 2 child IOs. 2624 */ 2625 2626 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2627 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2629 2630 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2631 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2632 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2633 2634 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2635 CU_ASSERT(rc == 0); 2636 CU_ASSERT(g_io_done == false); 2637 2638 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2639 stub_complete_io(2); 2640 CU_ASSERT(g_io_done == true); 2641 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2642 2643 /* Now set up a more complex, multi-vector command that needs to be split, 2644 * including splitting iovecs. 2645 * optimal_io_boundary < max_segment_size * max_num_segments 2646 */ 2647 bdev->max_segment_size = 3 * 512; 2648 bdev->max_num_segments = 6; 2649 g_io_done = false; 2650 2651 iov[0].iov_base = (void *)0x10000; 2652 iov[0].iov_len = 4 * 512; 2653 iov[1].iov_base = (void *)0x20000; 2654 iov[1].iov_len = 4 * 512; 2655 iov[2].iov_base = (void *)0x30000; 2656 iov[2].iov_len = 10 * 512; 2657 2658 /* IO crossing the IO boundary requires split. 2659 * The 1st child IO segment size exceeds the max_segment_size and after 2660 * splitting segment_size, the num_segments exceeds max_num_segments. 2661 * So 1st child IO will be split to 2 child IOs. 2662 * Total 3 child IOs. 2663 */ 2664 2665 /* The first 2 IOs are in an IO boundary. 2666 * After splitting segment size the segment num exceeds. 2667 * So it splits to 2 child IOs. 2668 */ 2669 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2670 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2671 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2672 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2673 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2674 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2675 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2676 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2677 2678 /* The 2nd child IO has the left segment entry */ 2679 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2680 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2681 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2682 2683 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2684 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2685 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2686 2687 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2688 CU_ASSERT(rc == 0); 2689 CU_ASSERT(g_io_done == false); 2690 2691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2692 stub_complete_io(3); 2693 CU_ASSERT(g_io_done == true); 2694 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2695 2696 /* A very complicated case. Each sg entry exceeds max_segment_size 2697 * and split on io boundary. 2698 * optimal_io_boundary < max_segment_size * max_num_segments 2699 */ 2700 bdev->max_segment_size = 3 * 512; 2701 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2702 g_io_done = false; 2703 2704 for (i = 0; i < 20; i++) { 2705 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2706 iov[i].iov_len = 512 * 4; 2707 } 2708 2709 /* IO crossing the IO boundary requires split. 2710 * 80 block length can split 5 child IOs base on offset and IO boundary. 2711 * Each iov entry needs to be split to 2 entries because of max_segment_size 2712 * Total 5 child IOs. 2713 */ 2714 2715 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2716 * So each child IO occupies 8 child iov entries. 2717 */ 2718 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2719 for (i = 0; i < 4; i++) { 2720 int iovcnt = i * 2; 2721 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2722 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2723 } 2724 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2725 2726 /* 2nd child IO and total 16 child iov entries of parent IO */ 2727 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2728 for (i = 4; i < 8; i++) { 2729 int iovcnt = (i - 4) * 2; 2730 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2731 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2732 } 2733 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2734 2735 /* 3rd child IO and total 24 child iov entries of parent IO */ 2736 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2737 for (i = 8; i < 12; i++) { 2738 int iovcnt = (i - 8) * 2; 2739 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2740 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2741 } 2742 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2743 2744 /* 4th child IO and total 32 child iov entries of parent IO */ 2745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2746 for (i = 12; i < 16; i++) { 2747 int iovcnt = (i - 12) * 2; 2748 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2749 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2750 } 2751 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2752 2753 /* 5th child IO and because of the child iov entry it should be split 2754 * in next round. 2755 */ 2756 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2757 for (i = 16; i < 20; i++) { 2758 int iovcnt = (i - 16) * 2; 2759 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2760 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2761 } 2762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2763 2764 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2765 CU_ASSERT(rc == 0); 2766 CU_ASSERT(g_io_done == false); 2767 2768 /* First split round */ 2769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2770 stub_complete_io(4); 2771 CU_ASSERT(g_io_done == false); 2772 2773 /* Second split round */ 2774 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2775 stub_complete_io(1); 2776 CU_ASSERT(g_io_done == true); 2777 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2778 2779 spdk_put_io_channel(io_ch); 2780 spdk_bdev_close(desc); 2781 free_bdev(bdev); 2782 spdk_bdev_finish(bdev_fini_cb, NULL); 2783 poll_threads(); 2784 } 2785 2786 static void 2787 bdev_io_split_with_io_wait(void) 2788 { 2789 struct spdk_bdev *bdev; 2790 struct spdk_bdev_desc *desc = NULL; 2791 struct spdk_io_channel *io_ch; 2792 struct spdk_bdev_channel *channel; 2793 struct spdk_bdev_mgmt_channel *mgmt_ch; 2794 struct spdk_bdev_opts bdev_opts = {}; 2795 struct iovec iov[3]; 2796 struct ut_expected_io *expected_io; 2797 int rc; 2798 2799 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2800 bdev_opts.bdev_io_pool_size = 2; 2801 bdev_opts.bdev_io_cache_size = 1; 2802 2803 rc = spdk_bdev_set_opts(&bdev_opts); 2804 CU_ASSERT(rc == 0); 2805 spdk_bdev_initialize(bdev_init_cb, NULL); 2806 2807 bdev = allocate_bdev("bdev0"); 2808 2809 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2810 CU_ASSERT(rc == 0); 2811 CU_ASSERT(desc != NULL); 2812 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2813 io_ch = spdk_bdev_get_io_channel(desc); 2814 CU_ASSERT(io_ch != NULL); 2815 channel = spdk_io_channel_get_ctx(io_ch); 2816 mgmt_ch = channel->shared_resource->mgmt_ch; 2817 2818 bdev->optimal_io_boundary = 16; 2819 bdev->split_on_optimal_io_boundary = true; 2820 2821 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2822 CU_ASSERT(rc == 0); 2823 2824 /* Now test that a single-vector command is split correctly. 2825 * Offset 14, length 8, payload 0xF000 2826 * Child - Offset 14, length 2, payload 0xF000 2827 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2828 * 2829 * Set up the expected values before calling spdk_bdev_read_blocks 2830 */ 2831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2832 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2833 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2834 2835 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2836 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2837 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2838 2839 /* The following children will be submitted sequentially due to the capacity of 2840 * spdk_bdev_io. 2841 */ 2842 2843 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2844 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2845 CU_ASSERT(rc == 0); 2846 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2847 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2848 2849 /* Completing the first read I/O will submit the first child */ 2850 stub_complete_io(1); 2851 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2852 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2853 2854 /* Completing the first child will submit the second child */ 2855 stub_complete_io(1); 2856 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2857 2858 /* Complete the second child I/O. This should result in our callback getting 2859 * invoked since the parent I/O is now complete. 2860 */ 2861 stub_complete_io(1); 2862 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2863 2864 /* Now set up a more complex, multi-vector command that needs to be split, 2865 * including splitting iovecs. 2866 */ 2867 iov[0].iov_base = (void *)0x10000; 2868 iov[0].iov_len = 512; 2869 iov[1].iov_base = (void *)0x20000; 2870 iov[1].iov_len = 20 * 512; 2871 iov[2].iov_base = (void *)0x30000; 2872 iov[2].iov_len = 11 * 512; 2873 2874 g_io_done = false; 2875 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2876 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2877 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2878 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2879 2880 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2881 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2882 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2883 2884 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2885 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2886 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2887 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2888 2889 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2890 CU_ASSERT(rc == 0); 2891 CU_ASSERT(g_io_done == false); 2892 2893 /* The following children will be submitted sequentially due to the capacity of 2894 * spdk_bdev_io. 2895 */ 2896 2897 /* Completing the first child will submit the second child */ 2898 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2899 stub_complete_io(1); 2900 CU_ASSERT(g_io_done == false); 2901 2902 /* Completing the second child will submit the third child */ 2903 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2904 stub_complete_io(1); 2905 CU_ASSERT(g_io_done == false); 2906 2907 /* Completing the third child will result in our callback getting invoked 2908 * since the parent I/O is now complete. 2909 */ 2910 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2911 stub_complete_io(1); 2912 CU_ASSERT(g_io_done == true); 2913 2914 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2915 2916 spdk_put_io_channel(io_ch); 2917 spdk_bdev_close(desc); 2918 free_bdev(bdev); 2919 spdk_bdev_finish(bdev_fini_cb, NULL); 2920 poll_threads(); 2921 } 2922 2923 static void 2924 bdev_io_write_unit_split_test(void) 2925 { 2926 struct spdk_bdev *bdev; 2927 struct spdk_bdev_desc *desc = NULL; 2928 struct spdk_io_channel *io_ch; 2929 struct spdk_bdev_opts bdev_opts = {}; 2930 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2931 struct ut_expected_io *expected_io; 2932 uint64_t i; 2933 int rc; 2934 2935 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2936 bdev_opts.bdev_io_pool_size = 512; 2937 bdev_opts.bdev_io_cache_size = 64; 2938 2939 rc = spdk_bdev_set_opts(&bdev_opts); 2940 CU_ASSERT(rc == 0); 2941 spdk_bdev_initialize(bdev_init_cb, NULL); 2942 2943 bdev = allocate_bdev("bdev0"); 2944 2945 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2946 CU_ASSERT(rc == 0); 2947 SPDK_CU_ASSERT_FATAL(desc != NULL); 2948 io_ch = spdk_bdev_get_io_channel(desc); 2949 CU_ASSERT(io_ch != NULL); 2950 2951 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2952 bdev->write_unit_size = 32; 2953 bdev->split_on_write_unit = true; 2954 g_io_done = false; 2955 2956 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2957 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2958 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2959 2960 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 2961 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 2962 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2963 2964 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2965 CU_ASSERT(rc == 0); 2966 CU_ASSERT(g_io_done == false); 2967 2968 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2969 stub_complete_io(2); 2970 CU_ASSERT(g_io_done == true); 2971 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2972 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2973 2974 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 2975 * based on write_unit_size, not optimal_io_boundary */ 2976 bdev->split_on_optimal_io_boundary = true; 2977 bdev->optimal_io_boundary = 16; 2978 g_io_done = false; 2979 2980 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2981 CU_ASSERT(rc == 0); 2982 CU_ASSERT(g_io_done == false); 2983 2984 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2985 stub_complete_io(2); 2986 CU_ASSERT(g_io_done == true); 2987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2988 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2989 2990 /* Write I/O should fail if it is smaller than write_unit_size */ 2991 g_io_done = false; 2992 2993 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 2994 CU_ASSERT(rc == 0); 2995 CU_ASSERT(g_io_done == false); 2996 2997 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2998 poll_threads(); 2999 CU_ASSERT(g_io_done == true); 3000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3001 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3002 3003 /* Same for I/O not aligned to write_unit_size */ 3004 g_io_done = false; 3005 3006 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3007 CU_ASSERT(rc == 0); 3008 CU_ASSERT(g_io_done == false); 3009 3010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3011 poll_threads(); 3012 CU_ASSERT(g_io_done == true); 3013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3014 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3015 3016 /* Write should fail if it needs to be split but there are not enough iovs to submit 3017 * an entire write unit */ 3018 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3019 g_io_done = false; 3020 3021 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3022 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3023 iov[i].iov_len = 512; 3024 } 3025 3026 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3027 io_done, NULL); 3028 CU_ASSERT(rc == 0); 3029 CU_ASSERT(g_io_done == false); 3030 3031 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3032 poll_threads(); 3033 CU_ASSERT(g_io_done == true); 3034 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3035 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3036 3037 spdk_put_io_channel(io_ch); 3038 spdk_bdev_close(desc); 3039 free_bdev(bdev); 3040 spdk_bdev_finish(bdev_fini_cb, NULL); 3041 poll_threads(); 3042 } 3043 3044 static void 3045 bdev_io_alignment(void) 3046 { 3047 struct spdk_bdev *bdev; 3048 struct spdk_bdev_desc *desc = NULL; 3049 struct spdk_io_channel *io_ch; 3050 struct spdk_bdev_opts bdev_opts = {}; 3051 int rc; 3052 void *buf = NULL; 3053 struct iovec iovs[2]; 3054 int iovcnt; 3055 uint64_t alignment; 3056 3057 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3058 bdev_opts.bdev_io_pool_size = 20; 3059 bdev_opts.bdev_io_cache_size = 2; 3060 3061 rc = spdk_bdev_set_opts(&bdev_opts); 3062 CU_ASSERT(rc == 0); 3063 spdk_bdev_initialize(bdev_init_cb, NULL); 3064 3065 fn_table.submit_request = stub_submit_request_get_buf; 3066 bdev = allocate_bdev("bdev0"); 3067 3068 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3069 CU_ASSERT(rc == 0); 3070 CU_ASSERT(desc != NULL); 3071 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3072 io_ch = spdk_bdev_get_io_channel(desc); 3073 CU_ASSERT(io_ch != NULL); 3074 3075 /* Create aligned buffer */ 3076 rc = posix_memalign(&buf, 4096, 8192); 3077 SPDK_CU_ASSERT_FATAL(rc == 0); 3078 3079 /* Pass aligned single buffer with no alignment required */ 3080 alignment = 1; 3081 bdev->required_alignment = spdk_u32log2(alignment); 3082 3083 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3084 CU_ASSERT(rc == 0); 3085 stub_complete_io(1); 3086 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3087 alignment)); 3088 3089 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3090 CU_ASSERT(rc == 0); 3091 stub_complete_io(1); 3092 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3093 alignment)); 3094 3095 /* Pass unaligned single buffer with no alignment required */ 3096 alignment = 1; 3097 bdev->required_alignment = spdk_u32log2(alignment); 3098 3099 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3100 CU_ASSERT(rc == 0); 3101 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3102 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3103 stub_complete_io(1); 3104 3105 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3106 CU_ASSERT(rc == 0); 3107 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3108 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3109 stub_complete_io(1); 3110 3111 /* Pass unaligned single buffer with 512 alignment required */ 3112 alignment = 512; 3113 bdev->required_alignment = spdk_u32log2(alignment); 3114 3115 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3116 CU_ASSERT(rc == 0); 3117 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3118 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3119 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3120 alignment)); 3121 stub_complete_io(1); 3122 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3123 3124 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3125 CU_ASSERT(rc == 0); 3126 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3127 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3128 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3129 alignment)); 3130 stub_complete_io(1); 3131 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3132 3133 /* Pass unaligned single buffer with 4096 alignment required */ 3134 alignment = 4096; 3135 bdev->required_alignment = spdk_u32log2(alignment); 3136 3137 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3138 CU_ASSERT(rc == 0); 3139 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3140 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3141 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3142 alignment)); 3143 stub_complete_io(1); 3144 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3145 3146 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3147 CU_ASSERT(rc == 0); 3148 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3149 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3150 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3151 alignment)); 3152 stub_complete_io(1); 3153 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3154 3155 /* Pass aligned iovs with no alignment required */ 3156 alignment = 1; 3157 bdev->required_alignment = spdk_u32log2(alignment); 3158 3159 iovcnt = 1; 3160 iovs[0].iov_base = buf; 3161 iovs[0].iov_len = 512; 3162 3163 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3164 CU_ASSERT(rc == 0); 3165 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3166 stub_complete_io(1); 3167 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3168 3169 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3170 CU_ASSERT(rc == 0); 3171 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3172 stub_complete_io(1); 3173 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3174 3175 /* Pass unaligned iovs with no alignment required */ 3176 alignment = 1; 3177 bdev->required_alignment = spdk_u32log2(alignment); 3178 3179 iovcnt = 2; 3180 iovs[0].iov_base = buf + 16; 3181 iovs[0].iov_len = 256; 3182 iovs[1].iov_base = buf + 16 + 256 + 32; 3183 iovs[1].iov_len = 256; 3184 3185 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3186 CU_ASSERT(rc == 0); 3187 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3188 stub_complete_io(1); 3189 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3190 3191 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3192 CU_ASSERT(rc == 0); 3193 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3194 stub_complete_io(1); 3195 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3196 3197 /* Pass unaligned iov with 2048 alignment required */ 3198 alignment = 2048; 3199 bdev->required_alignment = spdk_u32log2(alignment); 3200 3201 iovcnt = 2; 3202 iovs[0].iov_base = buf + 16; 3203 iovs[0].iov_len = 256; 3204 iovs[1].iov_base = buf + 16 + 256 + 32; 3205 iovs[1].iov_len = 256; 3206 3207 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3208 CU_ASSERT(rc == 0); 3209 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3210 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3211 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3212 alignment)); 3213 stub_complete_io(1); 3214 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3215 3216 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3217 CU_ASSERT(rc == 0); 3218 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3219 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3220 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3221 alignment)); 3222 stub_complete_io(1); 3223 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3224 3225 /* Pass iov without allocated buffer without alignment required */ 3226 alignment = 1; 3227 bdev->required_alignment = spdk_u32log2(alignment); 3228 3229 iovcnt = 1; 3230 iovs[0].iov_base = NULL; 3231 iovs[0].iov_len = 0; 3232 3233 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3234 CU_ASSERT(rc == 0); 3235 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3236 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3237 alignment)); 3238 stub_complete_io(1); 3239 3240 /* Pass iov without allocated buffer with 1024 alignment required */ 3241 alignment = 1024; 3242 bdev->required_alignment = spdk_u32log2(alignment); 3243 3244 iovcnt = 1; 3245 iovs[0].iov_base = NULL; 3246 iovs[0].iov_len = 0; 3247 3248 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3249 CU_ASSERT(rc == 0); 3250 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3251 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3252 alignment)); 3253 stub_complete_io(1); 3254 3255 spdk_put_io_channel(io_ch); 3256 spdk_bdev_close(desc); 3257 free_bdev(bdev); 3258 fn_table.submit_request = stub_submit_request; 3259 spdk_bdev_finish(bdev_fini_cb, NULL); 3260 poll_threads(); 3261 3262 free(buf); 3263 } 3264 3265 static void 3266 bdev_io_alignment_with_boundary(void) 3267 { 3268 struct spdk_bdev *bdev; 3269 struct spdk_bdev_desc *desc = NULL; 3270 struct spdk_io_channel *io_ch; 3271 struct spdk_bdev_opts bdev_opts = {}; 3272 int rc; 3273 void *buf = NULL; 3274 struct iovec iovs[2]; 3275 int iovcnt; 3276 uint64_t alignment; 3277 3278 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3279 bdev_opts.bdev_io_pool_size = 20; 3280 bdev_opts.bdev_io_cache_size = 2; 3281 3282 bdev_opts.opts_size = sizeof(bdev_opts); 3283 rc = spdk_bdev_set_opts(&bdev_opts); 3284 CU_ASSERT(rc == 0); 3285 spdk_bdev_initialize(bdev_init_cb, NULL); 3286 3287 fn_table.submit_request = stub_submit_request_get_buf; 3288 bdev = allocate_bdev("bdev0"); 3289 3290 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3291 CU_ASSERT(rc == 0); 3292 CU_ASSERT(desc != NULL); 3293 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3294 io_ch = spdk_bdev_get_io_channel(desc); 3295 CU_ASSERT(io_ch != NULL); 3296 3297 /* Create aligned buffer */ 3298 rc = posix_memalign(&buf, 4096, 131072); 3299 SPDK_CU_ASSERT_FATAL(rc == 0); 3300 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3301 3302 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3303 alignment = 512; 3304 bdev->required_alignment = spdk_u32log2(alignment); 3305 bdev->optimal_io_boundary = 2; 3306 bdev->split_on_optimal_io_boundary = true; 3307 3308 iovcnt = 1; 3309 iovs[0].iov_base = NULL; 3310 iovs[0].iov_len = 512 * 3; 3311 3312 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3313 CU_ASSERT(rc == 0); 3314 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3315 stub_complete_io(2); 3316 3317 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3318 alignment = 512; 3319 bdev->required_alignment = spdk_u32log2(alignment); 3320 bdev->optimal_io_boundary = 16; 3321 bdev->split_on_optimal_io_boundary = true; 3322 3323 iovcnt = 1; 3324 iovs[0].iov_base = NULL; 3325 iovs[0].iov_len = 512 * 16; 3326 3327 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3328 CU_ASSERT(rc == 0); 3329 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3330 stub_complete_io(2); 3331 3332 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3333 alignment = 512; 3334 bdev->required_alignment = spdk_u32log2(alignment); 3335 bdev->optimal_io_boundary = 128; 3336 bdev->split_on_optimal_io_boundary = true; 3337 3338 iovcnt = 1; 3339 iovs[0].iov_base = buf + 16; 3340 iovs[0].iov_len = 512 * 160; 3341 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3342 CU_ASSERT(rc == 0); 3343 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3344 stub_complete_io(2); 3345 3346 /* 512 * 3 with 2 IO boundary */ 3347 alignment = 512; 3348 bdev->required_alignment = spdk_u32log2(alignment); 3349 bdev->optimal_io_boundary = 2; 3350 bdev->split_on_optimal_io_boundary = true; 3351 3352 iovcnt = 2; 3353 iovs[0].iov_base = buf + 16; 3354 iovs[0].iov_len = 512; 3355 iovs[1].iov_base = buf + 16 + 512 + 32; 3356 iovs[1].iov_len = 1024; 3357 3358 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3359 CU_ASSERT(rc == 0); 3360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3361 stub_complete_io(2); 3362 3363 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3364 CU_ASSERT(rc == 0); 3365 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3366 stub_complete_io(2); 3367 3368 /* 512 * 64 with 32 IO boundary */ 3369 bdev->optimal_io_boundary = 32; 3370 iovcnt = 2; 3371 iovs[0].iov_base = buf + 16; 3372 iovs[0].iov_len = 16384; 3373 iovs[1].iov_base = buf + 16 + 16384 + 32; 3374 iovs[1].iov_len = 16384; 3375 3376 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3377 CU_ASSERT(rc == 0); 3378 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3379 stub_complete_io(3); 3380 3381 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3382 CU_ASSERT(rc == 0); 3383 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3384 stub_complete_io(3); 3385 3386 /* 512 * 160 with 32 IO boundary */ 3387 iovcnt = 1; 3388 iovs[0].iov_base = buf + 16; 3389 iovs[0].iov_len = 16384 + 65536; 3390 3391 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3392 CU_ASSERT(rc == 0); 3393 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3394 stub_complete_io(6); 3395 3396 spdk_put_io_channel(io_ch); 3397 spdk_bdev_close(desc); 3398 free_bdev(bdev); 3399 fn_table.submit_request = stub_submit_request; 3400 spdk_bdev_finish(bdev_fini_cb, NULL); 3401 poll_threads(); 3402 3403 free(buf); 3404 } 3405 3406 static void 3407 histogram_status_cb(void *cb_arg, int status) 3408 { 3409 g_status = status; 3410 } 3411 3412 static void 3413 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3414 { 3415 g_status = status; 3416 g_histogram = histogram; 3417 } 3418 3419 static void 3420 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3421 uint64_t total, uint64_t so_far) 3422 { 3423 g_count += count; 3424 } 3425 3426 static void 3427 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3428 { 3429 spdk_histogram_data_fn cb_fn = cb_arg; 3430 3431 g_status = status; 3432 3433 if (status == 0) { 3434 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3435 } 3436 } 3437 3438 static void 3439 bdev_histograms(void) 3440 { 3441 struct spdk_bdev *bdev; 3442 struct spdk_bdev_desc *desc = NULL; 3443 struct spdk_io_channel *ch; 3444 struct spdk_histogram_data *histogram; 3445 uint8_t buf[4096]; 3446 int rc; 3447 3448 spdk_bdev_initialize(bdev_init_cb, NULL); 3449 3450 bdev = allocate_bdev("bdev"); 3451 3452 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3453 CU_ASSERT(rc == 0); 3454 CU_ASSERT(desc != NULL); 3455 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3456 3457 ch = spdk_bdev_get_io_channel(desc); 3458 CU_ASSERT(ch != NULL); 3459 3460 /* Enable histogram */ 3461 g_status = -1; 3462 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3463 poll_threads(); 3464 CU_ASSERT(g_status == 0); 3465 CU_ASSERT(bdev->internal.histogram_enabled == true); 3466 3467 /* Allocate histogram */ 3468 histogram = spdk_histogram_data_alloc(); 3469 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3470 3471 /* Check if histogram is zeroed */ 3472 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3473 poll_threads(); 3474 CU_ASSERT(g_status == 0); 3475 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3476 3477 g_count = 0; 3478 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3479 3480 CU_ASSERT(g_count == 0); 3481 3482 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3483 CU_ASSERT(rc == 0); 3484 3485 spdk_delay_us(10); 3486 stub_complete_io(1); 3487 poll_threads(); 3488 3489 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3490 CU_ASSERT(rc == 0); 3491 3492 spdk_delay_us(10); 3493 stub_complete_io(1); 3494 poll_threads(); 3495 3496 /* Check if histogram gathered data from all I/O channels */ 3497 g_histogram = NULL; 3498 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3499 poll_threads(); 3500 CU_ASSERT(g_status == 0); 3501 CU_ASSERT(bdev->internal.histogram_enabled == true); 3502 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3503 3504 g_count = 0; 3505 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3506 CU_ASSERT(g_count == 2); 3507 3508 g_count = 0; 3509 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3510 CU_ASSERT(g_status == 0); 3511 CU_ASSERT(g_count == 2); 3512 3513 /* Disable histogram */ 3514 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3515 poll_threads(); 3516 CU_ASSERT(g_status == 0); 3517 CU_ASSERT(bdev->internal.histogram_enabled == false); 3518 3519 /* Try to run histogram commands on disabled bdev */ 3520 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3521 poll_threads(); 3522 CU_ASSERT(g_status == -EFAULT); 3523 3524 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3525 CU_ASSERT(g_status == -EFAULT); 3526 3527 spdk_histogram_data_free(histogram); 3528 spdk_put_io_channel(ch); 3529 spdk_bdev_close(desc); 3530 free_bdev(bdev); 3531 spdk_bdev_finish(bdev_fini_cb, NULL); 3532 poll_threads(); 3533 } 3534 3535 static void 3536 _bdev_compare(bool emulated) 3537 { 3538 struct spdk_bdev *bdev; 3539 struct spdk_bdev_desc *desc = NULL; 3540 struct spdk_io_channel *ioch; 3541 struct ut_expected_io *expected_io; 3542 uint64_t offset, num_blocks; 3543 uint32_t num_completed; 3544 char aa_buf[512]; 3545 char bb_buf[512]; 3546 struct iovec compare_iov; 3547 uint8_t expected_io_type; 3548 int rc; 3549 3550 if (emulated) { 3551 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3552 } else { 3553 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3554 } 3555 3556 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3557 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3558 3559 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3560 3561 spdk_bdev_initialize(bdev_init_cb, NULL); 3562 fn_table.submit_request = stub_submit_request_get_buf; 3563 bdev = allocate_bdev("bdev"); 3564 3565 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3566 CU_ASSERT_EQUAL(rc, 0); 3567 SPDK_CU_ASSERT_FATAL(desc != NULL); 3568 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3569 ioch = spdk_bdev_get_io_channel(desc); 3570 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3571 3572 fn_table.submit_request = stub_submit_request_get_buf; 3573 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3574 3575 offset = 50; 3576 num_blocks = 1; 3577 compare_iov.iov_base = aa_buf; 3578 compare_iov.iov_len = sizeof(aa_buf); 3579 3580 /* 1. successful compare */ 3581 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3582 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3583 3584 g_io_done = false; 3585 g_compare_read_buf = aa_buf; 3586 g_compare_read_buf_len = sizeof(aa_buf); 3587 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3588 CU_ASSERT_EQUAL(rc, 0); 3589 num_completed = stub_complete_io(1); 3590 CU_ASSERT_EQUAL(num_completed, 1); 3591 CU_ASSERT(g_io_done == true); 3592 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3593 3594 /* 2. miscompare */ 3595 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3596 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3597 3598 g_io_done = false; 3599 g_compare_read_buf = bb_buf; 3600 g_compare_read_buf_len = sizeof(bb_buf); 3601 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3602 CU_ASSERT_EQUAL(rc, 0); 3603 num_completed = stub_complete_io(1); 3604 CU_ASSERT_EQUAL(num_completed, 1); 3605 CU_ASSERT(g_io_done == true); 3606 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3607 3608 spdk_put_io_channel(ioch); 3609 spdk_bdev_close(desc); 3610 free_bdev(bdev); 3611 fn_table.submit_request = stub_submit_request; 3612 spdk_bdev_finish(bdev_fini_cb, NULL); 3613 poll_threads(); 3614 3615 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3616 3617 g_compare_read_buf = NULL; 3618 } 3619 3620 static void 3621 _bdev_compare_with_md(bool emulated) 3622 { 3623 struct spdk_bdev *bdev; 3624 struct spdk_bdev_desc *desc = NULL; 3625 struct spdk_io_channel *ioch; 3626 struct ut_expected_io *expected_io; 3627 uint64_t offset, num_blocks; 3628 uint32_t num_completed; 3629 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3630 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3631 char buf_miscompare[1024 /* 2 * blocklen */]; 3632 char md_buf[16]; 3633 char md_buf_miscompare[16]; 3634 struct iovec compare_iov; 3635 uint8_t expected_io_type; 3636 int rc; 3637 3638 if (emulated) { 3639 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3640 } else { 3641 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3642 } 3643 3644 memset(buf, 0xaa, sizeof(buf)); 3645 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3646 /* make last md different */ 3647 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3648 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3649 memset(md_buf, 0xaa, 16); 3650 memset(md_buf_miscompare, 0xbb, 16); 3651 3652 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3653 3654 spdk_bdev_initialize(bdev_init_cb, NULL); 3655 fn_table.submit_request = stub_submit_request_get_buf; 3656 bdev = allocate_bdev("bdev"); 3657 3658 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3659 CU_ASSERT_EQUAL(rc, 0); 3660 SPDK_CU_ASSERT_FATAL(desc != NULL); 3661 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3662 ioch = spdk_bdev_get_io_channel(desc); 3663 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3664 3665 fn_table.submit_request = stub_submit_request_get_buf; 3666 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3667 3668 offset = 50; 3669 num_blocks = 2; 3670 3671 /* interleaved md & data */ 3672 bdev->md_interleave = true; 3673 bdev->md_len = 8; 3674 bdev->blocklen = 512 + 8; 3675 compare_iov.iov_base = buf; 3676 compare_iov.iov_len = sizeof(buf); 3677 3678 /* 1. successful compare with md interleaved */ 3679 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3680 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3681 3682 g_io_done = false; 3683 g_compare_read_buf = buf; 3684 g_compare_read_buf_len = sizeof(buf); 3685 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3686 CU_ASSERT_EQUAL(rc, 0); 3687 num_completed = stub_complete_io(1); 3688 CU_ASSERT_EQUAL(num_completed, 1); 3689 CU_ASSERT(g_io_done == true); 3690 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3691 3692 /* 2. miscompare with md interleaved */ 3693 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3694 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3695 3696 g_io_done = false; 3697 g_compare_read_buf = buf_interleaved_miscompare; 3698 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3699 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3700 CU_ASSERT_EQUAL(rc, 0); 3701 num_completed = stub_complete_io(1); 3702 CU_ASSERT_EQUAL(num_completed, 1); 3703 CU_ASSERT(g_io_done == true); 3704 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3705 3706 /* Separate data & md buffers */ 3707 bdev->md_interleave = false; 3708 bdev->blocklen = 512; 3709 compare_iov.iov_base = buf; 3710 compare_iov.iov_len = 1024; 3711 3712 /* 3. successful compare with md separated */ 3713 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3714 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3715 3716 g_io_done = false; 3717 g_compare_read_buf = buf; 3718 g_compare_read_buf_len = 1024; 3719 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3720 g_compare_md_buf = md_buf; 3721 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3722 offset, num_blocks, io_done, NULL); 3723 CU_ASSERT_EQUAL(rc, 0); 3724 num_completed = stub_complete_io(1); 3725 CU_ASSERT_EQUAL(num_completed, 1); 3726 CU_ASSERT(g_io_done == true); 3727 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3728 3729 /* 4. miscompare with md separated where md buf is different */ 3730 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3731 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3732 3733 g_io_done = false; 3734 g_compare_read_buf = buf; 3735 g_compare_read_buf_len = 1024; 3736 g_compare_md_buf = md_buf_miscompare; 3737 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3738 offset, num_blocks, io_done, NULL); 3739 CU_ASSERT_EQUAL(rc, 0); 3740 num_completed = stub_complete_io(1); 3741 CU_ASSERT_EQUAL(num_completed, 1); 3742 CU_ASSERT(g_io_done == true); 3743 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3744 3745 /* 5. miscompare with md separated where buf is different */ 3746 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3747 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3748 3749 g_io_done = false; 3750 g_compare_read_buf = buf_miscompare; 3751 g_compare_read_buf_len = sizeof(buf_miscompare); 3752 g_compare_md_buf = md_buf; 3753 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3754 offset, num_blocks, io_done, NULL); 3755 CU_ASSERT_EQUAL(rc, 0); 3756 num_completed = stub_complete_io(1); 3757 CU_ASSERT_EQUAL(num_completed, 1); 3758 CU_ASSERT(g_io_done == true); 3759 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3760 3761 bdev->md_len = 0; 3762 g_compare_md_buf = NULL; 3763 3764 spdk_put_io_channel(ioch); 3765 spdk_bdev_close(desc); 3766 free_bdev(bdev); 3767 fn_table.submit_request = stub_submit_request; 3768 spdk_bdev_finish(bdev_fini_cb, NULL); 3769 poll_threads(); 3770 3771 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3772 3773 g_compare_read_buf = NULL; 3774 } 3775 3776 static void 3777 bdev_compare(void) 3778 { 3779 _bdev_compare(false); 3780 _bdev_compare_with_md(false); 3781 } 3782 3783 static void 3784 bdev_compare_emulated(void) 3785 { 3786 _bdev_compare(true); 3787 _bdev_compare_with_md(true); 3788 } 3789 3790 static void 3791 bdev_compare_and_write(void) 3792 { 3793 struct spdk_bdev *bdev; 3794 struct spdk_bdev_desc *desc = NULL; 3795 struct spdk_io_channel *ioch; 3796 struct ut_expected_io *expected_io; 3797 uint64_t offset, num_blocks; 3798 uint32_t num_completed; 3799 char aa_buf[512]; 3800 char bb_buf[512]; 3801 char cc_buf[512]; 3802 char write_buf[512]; 3803 struct iovec compare_iov; 3804 struct iovec write_iov; 3805 int rc; 3806 3807 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3808 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3809 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3810 3811 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3812 3813 spdk_bdev_initialize(bdev_init_cb, NULL); 3814 fn_table.submit_request = stub_submit_request_get_buf; 3815 bdev = allocate_bdev("bdev"); 3816 3817 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3818 CU_ASSERT_EQUAL(rc, 0); 3819 SPDK_CU_ASSERT_FATAL(desc != NULL); 3820 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3821 ioch = spdk_bdev_get_io_channel(desc); 3822 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3823 3824 fn_table.submit_request = stub_submit_request_get_buf; 3825 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3826 3827 offset = 50; 3828 num_blocks = 1; 3829 compare_iov.iov_base = aa_buf; 3830 compare_iov.iov_len = sizeof(aa_buf); 3831 write_iov.iov_base = bb_buf; 3832 write_iov.iov_len = sizeof(bb_buf); 3833 3834 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3835 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3836 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3837 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3838 3839 g_io_done = false; 3840 g_compare_read_buf = aa_buf; 3841 g_compare_read_buf_len = sizeof(aa_buf); 3842 memset(write_buf, 0, sizeof(write_buf)); 3843 g_compare_write_buf = write_buf; 3844 g_compare_write_buf_len = sizeof(write_buf); 3845 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3846 offset, num_blocks, io_done, NULL); 3847 /* Trigger range locking */ 3848 poll_threads(); 3849 CU_ASSERT_EQUAL(rc, 0); 3850 num_completed = stub_complete_io(1); 3851 CU_ASSERT_EQUAL(num_completed, 1); 3852 CU_ASSERT(g_io_done == false); 3853 num_completed = stub_complete_io(1); 3854 /* Trigger range unlocking */ 3855 poll_threads(); 3856 CU_ASSERT_EQUAL(num_completed, 1); 3857 CU_ASSERT(g_io_done == true); 3858 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3859 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3860 3861 /* Test miscompare */ 3862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3864 3865 g_io_done = false; 3866 g_compare_read_buf = cc_buf; 3867 g_compare_read_buf_len = sizeof(cc_buf); 3868 memset(write_buf, 0, sizeof(write_buf)); 3869 g_compare_write_buf = write_buf; 3870 g_compare_write_buf_len = sizeof(write_buf); 3871 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3872 offset, num_blocks, io_done, NULL); 3873 /* Trigger range locking */ 3874 poll_threads(); 3875 CU_ASSERT_EQUAL(rc, 0); 3876 num_completed = stub_complete_io(1); 3877 /* Trigger range unlocking earlier because we expect error here */ 3878 poll_threads(); 3879 CU_ASSERT_EQUAL(num_completed, 1); 3880 CU_ASSERT(g_io_done == true); 3881 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3882 num_completed = stub_complete_io(1); 3883 CU_ASSERT_EQUAL(num_completed, 0); 3884 3885 spdk_put_io_channel(ioch); 3886 spdk_bdev_close(desc); 3887 free_bdev(bdev); 3888 fn_table.submit_request = stub_submit_request; 3889 spdk_bdev_finish(bdev_fini_cb, NULL); 3890 poll_threads(); 3891 3892 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3893 3894 g_compare_read_buf = NULL; 3895 g_compare_write_buf = NULL; 3896 } 3897 3898 static void 3899 bdev_write_zeroes(void) 3900 { 3901 struct spdk_bdev *bdev; 3902 struct spdk_bdev_desc *desc = NULL; 3903 struct spdk_io_channel *ioch; 3904 struct ut_expected_io *expected_io; 3905 uint64_t offset, num_io_blocks, num_blocks; 3906 uint32_t num_completed, num_requests; 3907 int rc; 3908 3909 spdk_bdev_initialize(bdev_init_cb, NULL); 3910 bdev = allocate_bdev("bdev"); 3911 3912 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3913 CU_ASSERT_EQUAL(rc, 0); 3914 SPDK_CU_ASSERT_FATAL(desc != NULL); 3915 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3916 ioch = spdk_bdev_get_io_channel(desc); 3917 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3918 3919 fn_table.submit_request = stub_submit_request; 3920 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3921 3922 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3923 bdev->md_len = 0; 3924 bdev->blocklen = 4096; 3925 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3926 3927 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3928 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3929 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3930 CU_ASSERT_EQUAL(rc, 0); 3931 num_completed = stub_complete_io(1); 3932 CU_ASSERT_EQUAL(num_completed, 1); 3933 3934 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3935 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3936 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3937 num_requests = 2; 3938 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3939 3940 for (offset = 0; offset < num_requests; ++offset) { 3941 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3942 offset * num_io_blocks, num_io_blocks, 0); 3943 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3944 } 3945 3946 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3947 CU_ASSERT_EQUAL(rc, 0); 3948 num_completed = stub_complete_io(num_requests); 3949 CU_ASSERT_EQUAL(num_completed, num_requests); 3950 3951 /* Check that the splitting is correct if bdev has interleaved metadata */ 3952 bdev->md_interleave = true; 3953 bdev->md_len = 64; 3954 bdev->blocklen = 4096 + 64; 3955 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3956 3957 num_requests = offset = 0; 3958 while (offset < num_blocks) { 3959 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3960 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3961 offset, num_io_blocks, 0); 3962 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3963 offset += num_io_blocks; 3964 num_requests++; 3965 } 3966 3967 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3968 CU_ASSERT_EQUAL(rc, 0); 3969 num_completed = stub_complete_io(num_requests); 3970 CU_ASSERT_EQUAL(num_completed, num_requests); 3971 num_completed = stub_complete_io(num_requests); 3972 assert(num_completed == 0); 3973 3974 /* Check the the same for separate metadata buffer */ 3975 bdev->md_interleave = false; 3976 bdev->md_len = 64; 3977 bdev->blocklen = 4096; 3978 3979 num_requests = offset = 0; 3980 while (offset < num_blocks) { 3981 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3982 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3983 offset, num_io_blocks, 0); 3984 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3985 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3986 offset += num_io_blocks; 3987 num_requests++; 3988 } 3989 3990 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3991 CU_ASSERT_EQUAL(rc, 0); 3992 num_completed = stub_complete_io(num_requests); 3993 CU_ASSERT_EQUAL(num_completed, num_requests); 3994 3995 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3996 spdk_put_io_channel(ioch); 3997 spdk_bdev_close(desc); 3998 free_bdev(bdev); 3999 spdk_bdev_finish(bdev_fini_cb, NULL); 4000 poll_threads(); 4001 } 4002 4003 static void 4004 bdev_zcopy_write(void) 4005 { 4006 struct spdk_bdev *bdev; 4007 struct spdk_bdev_desc *desc = NULL; 4008 struct spdk_io_channel *ioch; 4009 struct ut_expected_io *expected_io; 4010 uint64_t offset, num_blocks; 4011 uint32_t num_completed; 4012 char aa_buf[512]; 4013 struct iovec iov; 4014 int rc; 4015 const bool populate = false; 4016 const bool commit = true; 4017 4018 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4019 4020 spdk_bdev_initialize(bdev_init_cb, NULL); 4021 bdev = allocate_bdev("bdev"); 4022 4023 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4024 CU_ASSERT_EQUAL(rc, 0); 4025 SPDK_CU_ASSERT_FATAL(desc != NULL); 4026 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4027 ioch = spdk_bdev_get_io_channel(desc); 4028 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4029 4030 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4031 4032 offset = 50; 4033 num_blocks = 1; 4034 iov.iov_base = NULL; 4035 iov.iov_len = 0; 4036 4037 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4038 g_zcopy_read_buf_len = (uint32_t) -1; 4039 /* Do a zcopy start for a write (populate=false) */ 4040 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4042 g_io_done = false; 4043 g_zcopy_write_buf = aa_buf; 4044 g_zcopy_write_buf_len = sizeof(aa_buf); 4045 g_zcopy_bdev_io = NULL; 4046 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4047 CU_ASSERT_EQUAL(rc, 0); 4048 num_completed = stub_complete_io(1); 4049 CU_ASSERT_EQUAL(num_completed, 1); 4050 CU_ASSERT(g_io_done == true); 4051 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4052 /* Check that the iov has been set up */ 4053 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4054 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4055 /* Check that the bdev_io has been saved */ 4056 CU_ASSERT(g_zcopy_bdev_io != NULL); 4057 /* Now do the zcopy end for a write (commit=true) */ 4058 g_io_done = false; 4059 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4060 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4061 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4062 CU_ASSERT_EQUAL(rc, 0); 4063 num_completed = stub_complete_io(1); 4064 CU_ASSERT_EQUAL(num_completed, 1); 4065 CU_ASSERT(g_io_done == true); 4066 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4067 /* Check the g_zcopy are reset by io_done */ 4068 CU_ASSERT(g_zcopy_write_buf == NULL); 4069 CU_ASSERT(g_zcopy_write_buf_len == 0); 4070 /* Check that io_done has freed the g_zcopy_bdev_io */ 4071 CU_ASSERT(g_zcopy_bdev_io == NULL); 4072 4073 /* Check the zcopy read buffer has not been touched which 4074 * ensures that the correct buffers were used. 4075 */ 4076 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4077 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4078 4079 spdk_put_io_channel(ioch); 4080 spdk_bdev_close(desc); 4081 free_bdev(bdev); 4082 spdk_bdev_finish(bdev_fini_cb, NULL); 4083 poll_threads(); 4084 } 4085 4086 static void 4087 bdev_zcopy_read(void) 4088 { 4089 struct spdk_bdev *bdev; 4090 struct spdk_bdev_desc *desc = NULL; 4091 struct spdk_io_channel *ioch; 4092 struct ut_expected_io *expected_io; 4093 uint64_t offset, num_blocks; 4094 uint32_t num_completed; 4095 char aa_buf[512]; 4096 struct iovec iov; 4097 int rc; 4098 const bool populate = true; 4099 const bool commit = false; 4100 4101 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4102 4103 spdk_bdev_initialize(bdev_init_cb, NULL); 4104 bdev = allocate_bdev("bdev"); 4105 4106 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4107 CU_ASSERT_EQUAL(rc, 0); 4108 SPDK_CU_ASSERT_FATAL(desc != NULL); 4109 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4110 ioch = spdk_bdev_get_io_channel(desc); 4111 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4112 4113 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4114 4115 offset = 50; 4116 num_blocks = 1; 4117 iov.iov_base = NULL; 4118 iov.iov_len = 0; 4119 4120 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4121 g_zcopy_write_buf_len = (uint32_t) -1; 4122 4123 /* Do a zcopy start for a read (populate=true) */ 4124 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4125 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4126 g_io_done = false; 4127 g_zcopy_read_buf = aa_buf; 4128 g_zcopy_read_buf_len = sizeof(aa_buf); 4129 g_zcopy_bdev_io = NULL; 4130 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4131 CU_ASSERT_EQUAL(rc, 0); 4132 num_completed = stub_complete_io(1); 4133 CU_ASSERT_EQUAL(num_completed, 1); 4134 CU_ASSERT(g_io_done == true); 4135 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4136 /* Check that the iov has been set up */ 4137 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4138 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4139 /* Check that the bdev_io has been saved */ 4140 CU_ASSERT(g_zcopy_bdev_io != NULL); 4141 4142 /* Now do the zcopy end for a read (commit=false) */ 4143 g_io_done = false; 4144 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4145 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4146 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4147 CU_ASSERT_EQUAL(rc, 0); 4148 num_completed = stub_complete_io(1); 4149 CU_ASSERT_EQUAL(num_completed, 1); 4150 CU_ASSERT(g_io_done == true); 4151 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4152 /* Check the g_zcopy are reset by io_done */ 4153 CU_ASSERT(g_zcopy_read_buf == NULL); 4154 CU_ASSERT(g_zcopy_read_buf_len == 0); 4155 /* Check that io_done has freed the g_zcopy_bdev_io */ 4156 CU_ASSERT(g_zcopy_bdev_io == NULL); 4157 4158 /* Check the zcopy write buffer has not been touched which 4159 * ensures that the correct buffers were used. 4160 */ 4161 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4162 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4163 4164 spdk_put_io_channel(ioch); 4165 spdk_bdev_close(desc); 4166 free_bdev(bdev); 4167 spdk_bdev_finish(bdev_fini_cb, NULL); 4168 poll_threads(); 4169 } 4170 4171 static void 4172 bdev_open_while_hotremove(void) 4173 { 4174 struct spdk_bdev *bdev; 4175 struct spdk_bdev_desc *desc[2] = {}; 4176 int rc; 4177 4178 bdev = allocate_bdev("bdev"); 4179 4180 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4181 CU_ASSERT(rc == 0); 4182 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4183 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4184 4185 spdk_bdev_unregister(bdev, NULL, NULL); 4186 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4187 poll_threads(); 4188 4189 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4190 CU_ASSERT(rc == -ENODEV); 4191 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4192 4193 spdk_bdev_close(desc[0]); 4194 free_bdev(bdev); 4195 } 4196 4197 static void 4198 bdev_close_while_hotremove(void) 4199 { 4200 struct spdk_bdev *bdev; 4201 struct spdk_bdev_desc *desc = NULL; 4202 int rc = 0; 4203 4204 bdev = allocate_bdev("bdev"); 4205 4206 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4207 CU_ASSERT_EQUAL(rc, 0); 4208 SPDK_CU_ASSERT_FATAL(desc != NULL); 4209 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4210 4211 /* Simulate hot-unplug by unregistering bdev */ 4212 g_event_type1 = 0xFF; 4213 g_unregister_arg = NULL; 4214 g_unregister_rc = -1; 4215 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4216 /* Close device while remove event is in flight */ 4217 spdk_bdev_close(desc); 4218 4219 /* Ensure that unregister callback is delayed */ 4220 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4221 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4222 4223 poll_threads(); 4224 4225 /* Event callback shall not be issued because device was closed */ 4226 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4227 /* Unregister callback is issued */ 4228 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4229 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4230 4231 free_bdev(bdev); 4232 } 4233 4234 static void 4235 bdev_open_ext(void) 4236 { 4237 struct spdk_bdev *bdev; 4238 struct spdk_bdev_desc *desc1 = NULL; 4239 struct spdk_bdev_desc *desc2 = NULL; 4240 int rc = 0; 4241 4242 bdev = allocate_bdev("bdev"); 4243 4244 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4245 CU_ASSERT_EQUAL(rc, -EINVAL); 4246 4247 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4248 CU_ASSERT_EQUAL(rc, 0); 4249 4250 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4251 CU_ASSERT_EQUAL(rc, 0); 4252 4253 g_event_type1 = 0xFF; 4254 g_event_type2 = 0xFF; 4255 4256 /* Simulate hot-unplug by unregistering bdev */ 4257 spdk_bdev_unregister(bdev, NULL, NULL); 4258 poll_threads(); 4259 4260 /* Check if correct events have been triggered in event callback fn */ 4261 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4262 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4263 4264 free_bdev(bdev); 4265 poll_threads(); 4266 } 4267 4268 static void 4269 bdev_open_ext_unregister(void) 4270 { 4271 struct spdk_bdev *bdev; 4272 struct spdk_bdev_desc *desc1 = NULL; 4273 struct spdk_bdev_desc *desc2 = NULL; 4274 struct spdk_bdev_desc *desc3 = NULL; 4275 struct spdk_bdev_desc *desc4 = NULL; 4276 int rc = 0; 4277 4278 bdev = allocate_bdev("bdev"); 4279 4280 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4281 CU_ASSERT_EQUAL(rc, -EINVAL); 4282 4283 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4284 CU_ASSERT_EQUAL(rc, 0); 4285 4286 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4287 CU_ASSERT_EQUAL(rc, 0); 4288 4289 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4290 CU_ASSERT_EQUAL(rc, 0); 4291 4292 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4293 CU_ASSERT_EQUAL(rc, 0); 4294 4295 g_event_type1 = 0xFF; 4296 g_event_type2 = 0xFF; 4297 g_event_type3 = 0xFF; 4298 g_event_type4 = 0xFF; 4299 4300 g_unregister_arg = NULL; 4301 g_unregister_rc = -1; 4302 4303 /* Simulate hot-unplug by unregistering bdev */ 4304 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4305 4306 /* 4307 * Unregister is handled asynchronously and event callback 4308 * (i.e., above bdev_open_cbN) will be called. 4309 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4310 * close the desc3 and desc4 so that the bdev is not closed. 4311 */ 4312 poll_threads(); 4313 4314 /* Check if correct events have been triggered in event callback fn */ 4315 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4316 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4317 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4318 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4319 4320 /* Check that unregister callback is delayed */ 4321 CU_ASSERT(g_unregister_arg == NULL); 4322 CU_ASSERT(g_unregister_rc == -1); 4323 4324 /* 4325 * Explicitly close desc3. As desc4 is still opened there, the 4326 * unergister callback is still delayed to execute. 4327 */ 4328 spdk_bdev_close(desc3); 4329 CU_ASSERT(g_unregister_arg == NULL); 4330 CU_ASSERT(g_unregister_rc == -1); 4331 4332 /* 4333 * Explicitly close desc4 to trigger the ongoing bdev unregister 4334 * operation after last desc is closed. 4335 */ 4336 spdk_bdev_close(desc4); 4337 4338 /* Poll the thread for the async unregister operation */ 4339 poll_threads(); 4340 4341 /* Check that unregister callback is executed */ 4342 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4343 CU_ASSERT(g_unregister_rc == 0); 4344 4345 free_bdev(bdev); 4346 poll_threads(); 4347 } 4348 4349 struct timeout_io_cb_arg { 4350 struct iovec iov; 4351 uint8_t type; 4352 }; 4353 4354 static int 4355 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4356 { 4357 struct spdk_bdev_io *bdev_io; 4358 int n = 0; 4359 4360 if (!ch) { 4361 return -1; 4362 } 4363 4364 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4365 n++; 4366 } 4367 4368 return n; 4369 } 4370 4371 static void 4372 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4373 { 4374 struct timeout_io_cb_arg *ctx = cb_arg; 4375 4376 ctx->type = bdev_io->type; 4377 ctx->iov.iov_base = bdev_io->iov.iov_base; 4378 ctx->iov.iov_len = bdev_io->iov.iov_len; 4379 } 4380 4381 static void 4382 bdev_set_io_timeout(void) 4383 { 4384 struct spdk_bdev *bdev; 4385 struct spdk_bdev_desc *desc = NULL; 4386 struct spdk_io_channel *io_ch = NULL; 4387 struct spdk_bdev_channel *bdev_ch = NULL; 4388 struct timeout_io_cb_arg cb_arg; 4389 4390 spdk_bdev_initialize(bdev_init_cb, NULL); 4391 4392 bdev = allocate_bdev("bdev"); 4393 4394 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4395 SPDK_CU_ASSERT_FATAL(desc != NULL); 4396 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4397 4398 io_ch = spdk_bdev_get_io_channel(desc); 4399 CU_ASSERT(io_ch != NULL); 4400 4401 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4402 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4403 4404 /* This is the part1. 4405 * We will check the bdev_ch->io_submitted list 4406 * TO make sure that it can link IOs and only the user submitted IOs 4407 */ 4408 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4409 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4410 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4411 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4412 stub_complete_io(1); 4413 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4414 stub_complete_io(1); 4415 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4416 4417 /* Split IO */ 4418 bdev->optimal_io_boundary = 16; 4419 bdev->split_on_optimal_io_boundary = true; 4420 4421 /* Now test that a single-vector command is split correctly. 4422 * Offset 14, length 8, payload 0xF000 4423 * Child - Offset 14, length 2, payload 0xF000 4424 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4425 * 4426 * Set up the expected values before calling spdk_bdev_read_blocks 4427 */ 4428 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4429 /* We count all submitted IOs including IO that are generated by splitting. */ 4430 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4431 stub_complete_io(1); 4432 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4433 stub_complete_io(1); 4434 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4435 4436 /* Also include the reset IO */ 4437 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4438 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4439 poll_threads(); 4440 stub_complete_io(1); 4441 poll_threads(); 4442 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4443 4444 /* This is part2 4445 * Test the desc timeout poller register 4446 */ 4447 4448 /* Successfully set the timeout */ 4449 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4450 CU_ASSERT(desc->io_timeout_poller != NULL); 4451 CU_ASSERT(desc->timeout_in_sec == 30); 4452 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4453 CU_ASSERT(desc->cb_arg == &cb_arg); 4454 4455 /* Change the timeout limit */ 4456 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4457 CU_ASSERT(desc->io_timeout_poller != NULL); 4458 CU_ASSERT(desc->timeout_in_sec == 20); 4459 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4460 CU_ASSERT(desc->cb_arg == &cb_arg); 4461 4462 /* Disable the timeout */ 4463 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4464 CU_ASSERT(desc->io_timeout_poller == NULL); 4465 4466 /* This the part3 4467 * We will test to catch timeout IO and check whether the IO is 4468 * the submitted one. 4469 */ 4470 memset(&cb_arg, 0, sizeof(cb_arg)); 4471 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4472 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4473 4474 /* Don't reach the limit */ 4475 spdk_delay_us(15 * spdk_get_ticks_hz()); 4476 poll_threads(); 4477 CU_ASSERT(cb_arg.type == 0); 4478 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4479 CU_ASSERT(cb_arg.iov.iov_len == 0); 4480 4481 /* 15 + 15 = 30 reach the limit */ 4482 spdk_delay_us(15 * spdk_get_ticks_hz()); 4483 poll_threads(); 4484 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4485 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4486 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4487 stub_complete_io(1); 4488 4489 /* Use the same split IO above and check the IO */ 4490 memset(&cb_arg, 0, sizeof(cb_arg)); 4491 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4492 4493 /* The first child complete in time */ 4494 spdk_delay_us(15 * spdk_get_ticks_hz()); 4495 poll_threads(); 4496 stub_complete_io(1); 4497 CU_ASSERT(cb_arg.type == 0); 4498 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4499 CU_ASSERT(cb_arg.iov.iov_len == 0); 4500 4501 /* The second child reach the limit */ 4502 spdk_delay_us(15 * spdk_get_ticks_hz()); 4503 poll_threads(); 4504 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4505 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4506 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4507 stub_complete_io(1); 4508 4509 /* Also include the reset IO */ 4510 memset(&cb_arg, 0, sizeof(cb_arg)); 4511 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4512 spdk_delay_us(30 * spdk_get_ticks_hz()); 4513 poll_threads(); 4514 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4515 stub_complete_io(1); 4516 poll_threads(); 4517 4518 spdk_put_io_channel(io_ch); 4519 spdk_bdev_close(desc); 4520 free_bdev(bdev); 4521 spdk_bdev_finish(bdev_fini_cb, NULL); 4522 poll_threads(); 4523 } 4524 4525 static void 4526 bdev_set_qd_sampling(void) 4527 { 4528 struct spdk_bdev *bdev; 4529 struct spdk_bdev_desc *desc = NULL; 4530 struct spdk_io_channel *io_ch = NULL; 4531 struct spdk_bdev_channel *bdev_ch = NULL; 4532 struct timeout_io_cb_arg cb_arg; 4533 4534 spdk_bdev_initialize(bdev_init_cb, NULL); 4535 4536 bdev = allocate_bdev("bdev"); 4537 4538 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4539 SPDK_CU_ASSERT_FATAL(desc != NULL); 4540 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4541 4542 io_ch = spdk_bdev_get_io_channel(desc); 4543 CU_ASSERT(io_ch != NULL); 4544 4545 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4546 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4547 4548 /* This is the part1. 4549 * We will check the bdev_ch->io_submitted list 4550 * TO make sure that it can link IOs and only the user submitted IOs 4551 */ 4552 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4553 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4554 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4555 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4556 stub_complete_io(1); 4557 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4558 stub_complete_io(1); 4559 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4560 4561 /* This is the part2. 4562 * Test the bdev's qd poller register 4563 */ 4564 /* 1st Successfully set the qd sampling period */ 4565 spdk_bdev_set_qd_sampling_period(bdev, 10); 4566 CU_ASSERT(bdev->internal.new_period == 10); 4567 CU_ASSERT(bdev->internal.period == 10); 4568 CU_ASSERT(bdev->internal.qd_desc != NULL); 4569 poll_threads(); 4570 CU_ASSERT(bdev->internal.qd_poller != NULL); 4571 4572 /* 2nd Change the qd sampling period */ 4573 spdk_bdev_set_qd_sampling_period(bdev, 20); 4574 CU_ASSERT(bdev->internal.new_period == 20); 4575 CU_ASSERT(bdev->internal.period == 10); 4576 CU_ASSERT(bdev->internal.qd_desc != NULL); 4577 poll_threads(); 4578 CU_ASSERT(bdev->internal.qd_poller != NULL); 4579 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4580 4581 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4582 spdk_delay_us(20); 4583 poll_thread_times(0, 1); 4584 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4585 spdk_bdev_set_qd_sampling_period(bdev, 30); 4586 CU_ASSERT(bdev->internal.new_period == 30); 4587 CU_ASSERT(bdev->internal.period == 20); 4588 poll_threads(); 4589 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4590 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4591 4592 /* 4th Disable the qd sampling period */ 4593 spdk_bdev_set_qd_sampling_period(bdev, 0); 4594 CU_ASSERT(bdev->internal.new_period == 0); 4595 CU_ASSERT(bdev->internal.period == 30); 4596 poll_threads(); 4597 CU_ASSERT(bdev->internal.qd_poller == NULL); 4598 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4599 CU_ASSERT(bdev->internal.qd_desc == NULL); 4600 4601 /* This is the part3. 4602 * We will test the submitted IO and reset works 4603 * properly with the qd sampling. 4604 */ 4605 memset(&cb_arg, 0, sizeof(cb_arg)); 4606 spdk_bdev_set_qd_sampling_period(bdev, 1); 4607 poll_threads(); 4608 4609 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4610 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4611 4612 /* Also include the reset IO */ 4613 memset(&cb_arg, 0, sizeof(cb_arg)); 4614 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4615 poll_threads(); 4616 4617 /* Close the desc */ 4618 spdk_put_io_channel(io_ch); 4619 spdk_bdev_close(desc); 4620 4621 /* Complete the submitted IO and reset */ 4622 stub_complete_io(2); 4623 poll_threads(); 4624 4625 free_bdev(bdev); 4626 spdk_bdev_finish(bdev_fini_cb, NULL); 4627 poll_threads(); 4628 } 4629 4630 static void 4631 lba_range_overlap(void) 4632 { 4633 struct lba_range r1, r2; 4634 4635 r1.offset = 100; 4636 r1.length = 50; 4637 4638 r2.offset = 0; 4639 r2.length = 1; 4640 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4641 4642 r2.offset = 0; 4643 r2.length = 100; 4644 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4645 4646 r2.offset = 0; 4647 r2.length = 110; 4648 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4649 4650 r2.offset = 100; 4651 r2.length = 10; 4652 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4653 4654 r2.offset = 110; 4655 r2.length = 20; 4656 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4657 4658 r2.offset = 140; 4659 r2.length = 150; 4660 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4661 4662 r2.offset = 130; 4663 r2.length = 200; 4664 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4665 4666 r2.offset = 150; 4667 r2.length = 100; 4668 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4669 4670 r2.offset = 110; 4671 r2.length = 0; 4672 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4673 } 4674 4675 static bool g_lock_lba_range_done; 4676 static bool g_unlock_lba_range_done; 4677 4678 static void 4679 lock_lba_range_done(void *ctx, int status) 4680 { 4681 g_lock_lba_range_done = true; 4682 } 4683 4684 static void 4685 unlock_lba_range_done(void *ctx, int status) 4686 { 4687 g_unlock_lba_range_done = true; 4688 } 4689 4690 static void 4691 lock_lba_range_check_ranges(void) 4692 { 4693 struct spdk_bdev *bdev; 4694 struct spdk_bdev_desc *desc = NULL; 4695 struct spdk_io_channel *io_ch; 4696 struct spdk_bdev_channel *channel; 4697 struct lba_range *range; 4698 int ctx1; 4699 int rc; 4700 4701 spdk_bdev_initialize(bdev_init_cb, NULL); 4702 4703 bdev = allocate_bdev("bdev0"); 4704 4705 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4706 CU_ASSERT(rc == 0); 4707 CU_ASSERT(desc != NULL); 4708 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4709 io_ch = spdk_bdev_get_io_channel(desc); 4710 CU_ASSERT(io_ch != NULL); 4711 channel = spdk_io_channel_get_ctx(io_ch); 4712 4713 g_lock_lba_range_done = false; 4714 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4715 CU_ASSERT(rc == 0); 4716 poll_threads(); 4717 4718 CU_ASSERT(g_lock_lba_range_done == true); 4719 range = TAILQ_FIRST(&channel->locked_ranges); 4720 SPDK_CU_ASSERT_FATAL(range != NULL); 4721 CU_ASSERT(range->offset == 20); 4722 CU_ASSERT(range->length == 10); 4723 CU_ASSERT(range->owner_ch == channel); 4724 4725 /* Unlocks must exactly match a lock. */ 4726 g_unlock_lba_range_done = false; 4727 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4728 CU_ASSERT(rc == -EINVAL); 4729 CU_ASSERT(g_unlock_lba_range_done == false); 4730 4731 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4732 CU_ASSERT(rc == 0); 4733 spdk_delay_us(100); 4734 poll_threads(); 4735 4736 CU_ASSERT(g_unlock_lba_range_done == true); 4737 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4738 4739 spdk_put_io_channel(io_ch); 4740 spdk_bdev_close(desc); 4741 free_bdev(bdev); 4742 spdk_bdev_finish(bdev_fini_cb, NULL); 4743 poll_threads(); 4744 } 4745 4746 static void 4747 lock_lba_range_with_io_outstanding(void) 4748 { 4749 struct spdk_bdev *bdev; 4750 struct spdk_bdev_desc *desc = NULL; 4751 struct spdk_io_channel *io_ch; 4752 struct spdk_bdev_channel *channel; 4753 struct lba_range *range; 4754 char buf[4096]; 4755 int ctx1; 4756 int rc; 4757 4758 spdk_bdev_initialize(bdev_init_cb, NULL); 4759 4760 bdev = allocate_bdev("bdev0"); 4761 4762 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4763 CU_ASSERT(rc == 0); 4764 CU_ASSERT(desc != NULL); 4765 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4766 io_ch = spdk_bdev_get_io_channel(desc); 4767 CU_ASSERT(io_ch != NULL); 4768 channel = spdk_io_channel_get_ctx(io_ch); 4769 4770 g_io_done = false; 4771 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4772 CU_ASSERT(rc == 0); 4773 4774 g_lock_lba_range_done = false; 4775 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4776 CU_ASSERT(rc == 0); 4777 poll_threads(); 4778 4779 /* The lock should immediately become valid, since there are no outstanding 4780 * write I/O. 4781 */ 4782 CU_ASSERT(g_io_done == false); 4783 CU_ASSERT(g_lock_lba_range_done == true); 4784 range = TAILQ_FIRST(&channel->locked_ranges); 4785 SPDK_CU_ASSERT_FATAL(range != NULL); 4786 CU_ASSERT(range->offset == 20); 4787 CU_ASSERT(range->length == 10); 4788 CU_ASSERT(range->owner_ch == channel); 4789 CU_ASSERT(range->locked_ctx == &ctx1); 4790 4791 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4792 CU_ASSERT(rc == 0); 4793 stub_complete_io(1); 4794 spdk_delay_us(100); 4795 poll_threads(); 4796 4797 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4798 4799 /* Now try again, but with a write I/O. */ 4800 g_io_done = false; 4801 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4802 CU_ASSERT(rc == 0); 4803 4804 g_lock_lba_range_done = false; 4805 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4806 CU_ASSERT(rc == 0); 4807 poll_threads(); 4808 4809 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4810 * But note that the range should be on the channel's locked_list, to make sure no 4811 * new write I/O are started. 4812 */ 4813 CU_ASSERT(g_io_done == false); 4814 CU_ASSERT(g_lock_lba_range_done == false); 4815 range = TAILQ_FIRST(&channel->locked_ranges); 4816 SPDK_CU_ASSERT_FATAL(range != NULL); 4817 CU_ASSERT(range->offset == 20); 4818 CU_ASSERT(range->length == 10); 4819 4820 /* Complete the write I/O. This should make the lock valid (checked by confirming 4821 * our callback was invoked). 4822 */ 4823 stub_complete_io(1); 4824 spdk_delay_us(100); 4825 poll_threads(); 4826 CU_ASSERT(g_io_done == true); 4827 CU_ASSERT(g_lock_lba_range_done == true); 4828 4829 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4830 CU_ASSERT(rc == 0); 4831 poll_threads(); 4832 4833 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4834 4835 spdk_put_io_channel(io_ch); 4836 spdk_bdev_close(desc); 4837 free_bdev(bdev); 4838 spdk_bdev_finish(bdev_fini_cb, NULL); 4839 poll_threads(); 4840 } 4841 4842 static void 4843 lock_lba_range_overlapped(void) 4844 { 4845 struct spdk_bdev *bdev; 4846 struct spdk_bdev_desc *desc = NULL; 4847 struct spdk_io_channel *io_ch; 4848 struct spdk_bdev_channel *channel; 4849 struct lba_range *range; 4850 int ctx1; 4851 int rc; 4852 4853 spdk_bdev_initialize(bdev_init_cb, NULL); 4854 4855 bdev = allocate_bdev("bdev0"); 4856 4857 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4858 CU_ASSERT(rc == 0); 4859 CU_ASSERT(desc != NULL); 4860 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4861 io_ch = spdk_bdev_get_io_channel(desc); 4862 CU_ASSERT(io_ch != NULL); 4863 channel = spdk_io_channel_get_ctx(io_ch); 4864 4865 /* Lock range 20-29. */ 4866 g_lock_lba_range_done = false; 4867 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4868 CU_ASSERT(rc == 0); 4869 poll_threads(); 4870 4871 CU_ASSERT(g_lock_lba_range_done == true); 4872 range = TAILQ_FIRST(&channel->locked_ranges); 4873 SPDK_CU_ASSERT_FATAL(range != NULL); 4874 CU_ASSERT(range->offset == 20); 4875 CU_ASSERT(range->length == 10); 4876 4877 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4878 * 20-29. 4879 */ 4880 g_lock_lba_range_done = false; 4881 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4882 CU_ASSERT(rc == 0); 4883 poll_threads(); 4884 4885 CU_ASSERT(g_lock_lba_range_done == false); 4886 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4887 SPDK_CU_ASSERT_FATAL(range != NULL); 4888 CU_ASSERT(range->offset == 25); 4889 CU_ASSERT(range->length == 15); 4890 4891 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4892 * no longer overlaps with an active lock. 4893 */ 4894 g_unlock_lba_range_done = false; 4895 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4896 CU_ASSERT(rc == 0); 4897 poll_threads(); 4898 4899 CU_ASSERT(g_unlock_lba_range_done == true); 4900 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4901 range = TAILQ_FIRST(&channel->locked_ranges); 4902 SPDK_CU_ASSERT_FATAL(range != NULL); 4903 CU_ASSERT(range->offset == 25); 4904 CU_ASSERT(range->length == 15); 4905 4906 /* Lock 40-59. This should immediately lock since it does not overlap with the 4907 * currently active 25-39 lock. 4908 */ 4909 g_lock_lba_range_done = false; 4910 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4911 CU_ASSERT(rc == 0); 4912 poll_threads(); 4913 4914 CU_ASSERT(g_lock_lba_range_done == true); 4915 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4916 SPDK_CU_ASSERT_FATAL(range != NULL); 4917 range = TAILQ_NEXT(range, tailq); 4918 SPDK_CU_ASSERT_FATAL(range != NULL); 4919 CU_ASSERT(range->offset == 40); 4920 CU_ASSERT(range->length == 20); 4921 4922 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4923 g_lock_lba_range_done = false; 4924 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4925 CU_ASSERT(rc == 0); 4926 poll_threads(); 4927 4928 CU_ASSERT(g_lock_lba_range_done == false); 4929 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4930 SPDK_CU_ASSERT_FATAL(range != NULL); 4931 CU_ASSERT(range->offset == 35); 4932 CU_ASSERT(range->length == 10); 4933 4934 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4935 * the 40-59 lock is still active. 4936 */ 4937 g_unlock_lba_range_done = false; 4938 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4939 CU_ASSERT(rc == 0); 4940 poll_threads(); 4941 4942 CU_ASSERT(g_unlock_lba_range_done == true); 4943 CU_ASSERT(g_lock_lba_range_done == false); 4944 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4945 SPDK_CU_ASSERT_FATAL(range != NULL); 4946 CU_ASSERT(range->offset == 35); 4947 CU_ASSERT(range->length == 10); 4948 4949 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4950 * no longer any active overlapping locks. 4951 */ 4952 g_unlock_lba_range_done = false; 4953 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4954 CU_ASSERT(rc == 0); 4955 poll_threads(); 4956 4957 CU_ASSERT(g_unlock_lba_range_done == true); 4958 CU_ASSERT(g_lock_lba_range_done == true); 4959 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4960 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4961 SPDK_CU_ASSERT_FATAL(range != NULL); 4962 CU_ASSERT(range->offset == 35); 4963 CU_ASSERT(range->length == 10); 4964 4965 /* Finally, unlock 35-44. */ 4966 g_unlock_lba_range_done = false; 4967 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4968 CU_ASSERT(rc == 0); 4969 poll_threads(); 4970 4971 CU_ASSERT(g_unlock_lba_range_done == true); 4972 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4973 4974 spdk_put_io_channel(io_ch); 4975 spdk_bdev_close(desc); 4976 free_bdev(bdev); 4977 spdk_bdev_finish(bdev_fini_cb, NULL); 4978 poll_threads(); 4979 } 4980 4981 static void 4982 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4983 { 4984 g_abort_done = true; 4985 g_abort_status = bdev_io->internal.status; 4986 spdk_bdev_free_io(bdev_io); 4987 } 4988 4989 static void 4990 bdev_io_abort(void) 4991 { 4992 struct spdk_bdev *bdev; 4993 struct spdk_bdev_desc *desc = NULL; 4994 struct spdk_io_channel *io_ch; 4995 struct spdk_bdev_channel *channel; 4996 struct spdk_bdev_mgmt_channel *mgmt_ch; 4997 struct spdk_bdev_opts bdev_opts = {}; 4998 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 4999 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5000 int rc; 5001 5002 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5003 bdev_opts.bdev_io_pool_size = 7; 5004 bdev_opts.bdev_io_cache_size = 2; 5005 5006 rc = spdk_bdev_set_opts(&bdev_opts); 5007 CU_ASSERT(rc == 0); 5008 spdk_bdev_initialize(bdev_init_cb, NULL); 5009 5010 bdev = allocate_bdev("bdev0"); 5011 5012 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5013 CU_ASSERT(rc == 0); 5014 CU_ASSERT(desc != NULL); 5015 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5016 io_ch = spdk_bdev_get_io_channel(desc); 5017 CU_ASSERT(io_ch != NULL); 5018 channel = spdk_io_channel_get_ctx(io_ch); 5019 mgmt_ch = channel->shared_resource->mgmt_ch; 5020 5021 g_abort_done = false; 5022 5023 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5024 5025 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5026 CU_ASSERT(rc == -ENOTSUP); 5027 5028 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5029 5030 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5031 CU_ASSERT(rc == 0); 5032 CU_ASSERT(g_abort_done == true); 5033 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5034 5035 /* Test the case that the target I/O was successfully aborted. */ 5036 g_io_done = false; 5037 5038 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5039 CU_ASSERT(rc == 0); 5040 CU_ASSERT(g_io_done == false); 5041 5042 g_abort_done = false; 5043 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5044 5045 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5046 CU_ASSERT(rc == 0); 5047 CU_ASSERT(g_io_done == true); 5048 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5049 stub_complete_io(1); 5050 CU_ASSERT(g_abort_done == true); 5051 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5052 5053 /* Test the case that the target I/O was not aborted because it completed 5054 * in the middle of execution of the abort. 5055 */ 5056 g_io_done = false; 5057 5058 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5059 CU_ASSERT(rc == 0); 5060 CU_ASSERT(g_io_done == false); 5061 5062 g_abort_done = false; 5063 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5064 5065 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5066 CU_ASSERT(rc == 0); 5067 CU_ASSERT(g_io_done == false); 5068 5069 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5070 stub_complete_io(1); 5071 CU_ASSERT(g_io_done == true); 5072 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5073 5074 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5075 stub_complete_io(1); 5076 CU_ASSERT(g_abort_done == true); 5077 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5078 5079 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5080 5081 bdev->optimal_io_boundary = 16; 5082 bdev->split_on_optimal_io_boundary = true; 5083 5084 /* Test that a single-vector command which is split is aborted correctly. 5085 * Offset 14, length 8, payload 0xF000 5086 * Child - Offset 14, length 2, payload 0xF000 5087 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5088 */ 5089 g_io_done = false; 5090 5091 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5092 CU_ASSERT(rc == 0); 5093 CU_ASSERT(g_io_done == false); 5094 5095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5096 5097 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5098 5099 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5100 CU_ASSERT(rc == 0); 5101 CU_ASSERT(g_io_done == true); 5102 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5103 stub_complete_io(2); 5104 CU_ASSERT(g_abort_done == true); 5105 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5106 5107 /* Test that a multi-vector command that needs to be split by strip and then 5108 * needs to be split is aborted correctly. Abort is requested before the second 5109 * child I/O was submitted. The parent I/O should complete with failure without 5110 * submitting the second child I/O. 5111 */ 5112 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5113 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5114 iov[i].iov_len = 512; 5115 } 5116 5117 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5118 g_io_done = false; 5119 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5120 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5121 CU_ASSERT(rc == 0); 5122 CU_ASSERT(g_io_done == false); 5123 5124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5125 5126 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5127 5128 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5129 CU_ASSERT(rc == 0); 5130 CU_ASSERT(g_io_done == true); 5131 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5132 stub_complete_io(1); 5133 CU_ASSERT(g_abort_done == true); 5134 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5135 5136 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5137 5138 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5139 5140 bdev->optimal_io_boundary = 16; 5141 g_io_done = false; 5142 5143 /* Test that a ingle-vector command which is split is aborted correctly. 5144 * Differently from the above, the child abort request will be submitted 5145 * sequentially due to the capacity of spdk_bdev_io. 5146 */ 5147 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5148 CU_ASSERT(rc == 0); 5149 CU_ASSERT(g_io_done == false); 5150 5151 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5152 5153 g_abort_done = false; 5154 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5155 5156 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5157 CU_ASSERT(rc == 0); 5158 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5159 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5160 5161 stub_complete_io(1); 5162 CU_ASSERT(g_io_done == true); 5163 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5164 stub_complete_io(3); 5165 CU_ASSERT(g_abort_done == true); 5166 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5167 5168 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5169 5170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5171 5172 spdk_put_io_channel(io_ch); 5173 spdk_bdev_close(desc); 5174 free_bdev(bdev); 5175 spdk_bdev_finish(bdev_fini_cb, NULL); 5176 poll_threads(); 5177 } 5178 5179 static void 5180 bdev_unmap(void) 5181 { 5182 struct spdk_bdev *bdev; 5183 struct spdk_bdev_desc *desc = NULL; 5184 struct spdk_io_channel *ioch; 5185 struct spdk_bdev_channel *bdev_ch; 5186 struct ut_expected_io *expected_io; 5187 struct spdk_bdev_opts bdev_opts = {}; 5188 uint32_t i, num_outstanding; 5189 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5190 int rc; 5191 5192 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5193 bdev_opts.bdev_io_pool_size = 512; 5194 bdev_opts.bdev_io_cache_size = 64; 5195 rc = spdk_bdev_set_opts(&bdev_opts); 5196 CU_ASSERT(rc == 0); 5197 5198 spdk_bdev_initialize(bdev_init_cb, NULL); 5199 bdev = allocate_bdev("bdev"); 5200 5201 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5202 CU_ASSERT_EQUAL(rc, 0); 5203 SPDK_CU_ASSERT_FATAL(desc != NULL); 5204 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5205 ioch = spdk_bdev_get_io_channel(desc); 5206 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5207 bdev_ch = spdk_io_channel_get_ctx(ioch); 5208 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5209 5210 fn_table.submit_request = stub_submit_request; 5211 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5212 5213 /* Case 1: First test the request won't be split */ 5214 num_blocks = 32; 5215 5216 g_io_done = false; 5217 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5218 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5219 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5220 CU_ASSERT_EQUAL(rc, 0); 5221 CU_ASSERT(g_io_done == false); 5222 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5223 stub_complete_io(1); 5224 CU_ASSERT(g_io_done == true); 5225 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5226 5227 /* Case 2: Test the split with 2 children requests */ 5228 bdev->max_unmap = 8; 5229 bdev->max_unmap_segments = 2; 5230 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5231 num_blocks = max_unmap_blocks * 2; 5232 offset = 0; 5233 5234 g_io_done = false; 5235 for (i = 0; i < 2; i++) { 5236 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5238 offset += max_unmap_blocks; 5239 } 5240 5241 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5242 CU_ASSERT_EQUAL(rc, 0); 5243 CU_ASSERT(g_io_done == false); 5244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5245 stub_complete_io(2); 5246 CU_ASSERT(g_io_done == true); 5247 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5248 5249 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5250 num_children = 15; 5251 num_blocks = max_unmap_blocks * num_children; 5252 g_io_done = false; 5253 offset = 0; 5254 for (i = 0; i < num_children; i++) { 5255 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5256 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5257 offset += max_unmap_blocks; 5258 } 5259 5260 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5261 CU_ASSERT_EQUAL(rc, 0); 5262 CU_ASSERT(g_io_done == false); 5263 5264 while (num_children > 0) { 5265 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5266 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5267 stub_complete_io(num_outstanding); 5268 num_children -= num_outstanding; 5269 } 5270 CU_ASSERT(g_io_done == true); 5271 5272 spdk_put_io_channel(ioch); 5273 spdk_bdev_close(desc); 5274 free_bdev(bdev); 5275 spdk_bdev_finish(bdev_fini_cb, NULL); 5276 poll_threads(); 5277 } 5278 5279 static void 5280 bdev_write_zeroes_split_test(void) 5281 { 5282 struct spdk_bdev *bdev; 5283 struct spdk_bdev_desc *desc = NULL; 5284 struct spdk_io_channel *ioch; 5285 struct spdk_bdev_channel *bdev_ch; 5286 struct ut_expected_io *expected_io; 5287 struct spdk_bdev_opts bdev_opts = {}; 5288 uint32_t i, num_outstanding; 5289 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5290 int rc; 5291 5292 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5293 bdev_opts.bdev_io_pool_size = 512; 5294 bdev_opts.bdev_io_cache_size = 64; 5295 rc = spdk_bdev_set_opts(&bdev_opts); 5296 CU_ASSERT(rc == 0); 5297 5298 spdk_bdev_initialize(bdev_init_cb, NULL); 5299 bdev = allocate_bdev("bdev"); 5300 5301 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5302 CU_ASSERT_EQUAL(rc, 0); 5303 SPDK_CU_ASSERT_FATAL(desc != NULL); 5304 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5305 ioch = spdk_bdev_get_io_channel(desc); 5306 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5307 bdev_ch = spdk_io_channel_get_ctx(ioch); 5308 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5309 5310 fn_table.submit_request = stub_submit_request; 5311 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5312 5313 /* Case 1: First test the request won't be split */ 5314 num_blocks = 32; 5315 5316 g_io_done = false; 5317 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5319 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5320 CU_ASSERT_EQUAL(rc, 0); 5321 CU_ASSERT(g_io_done == false); 5322 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5323 stub_complete_io(1); 5324 CU_ASSERT(g_io_done == true); 5325 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5326 5327 /* Case 2: Test the split with 2 children requests */ 5328 max_write_zeroes_blocks = 8; 5329 bdev->max_write_zeroes = max_write_zeroes_blocks; 5330 num_blocks = max_write_zeroes_blocks * 2; 5331 offset = 0; 5332 5333 g_io_done = false; 5334 for (i = 0; i < 2; i++) { 5335 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5336 0); 5337 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5338 offset += max_write_zeroes_blocks; 5339 } 5340 5341 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5342 CU_ASSERT_EQUAL(rc, 0); 5343 CU_ASSERT(g_io_done == false); 5344 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5345 stub_complete_io(2); 5346 CU_ASSERT(g_io_done == true); 5347 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5348 5349 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5350 num_children = 15; 5351 num_blocks = max_write_zeroes_blocks * num_children; 5352 g_io_done = false; 5353 offset = 0; 5354 for (i = 0; i < num_children; i++) { 5355 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5356 0); 5357 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5358 offset += max_write_zeroes_blocks; 5359 } 5360 5361 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5362 CU_ASSERT_EQUAL(rc, 0); 5363 CU_ASSERT(g_io_done == false); 5364 5365 while (num_children > 0) { 5366 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5367 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5368 stub_complete_io(num_outstanding); 5369 num_children -= num_outstanding; 5370 } 5371 CU_ASSERT(g_io_done == true); 5372 5373 spdk_put_io_channel(ioch); 5374 spdk_bdev_close(desc); 5375 free_bdev(bdev); 5376 spdk_bdev_finish(bdev_fini_cb, NULL); 5377 poll_threads(); 5378 } 5379 5380 static void 5381 bdev_set_options_test(void) 5382 { 5383 struct spdk_bdev_opts bdev_opts = {}; 5384 int rc; 5385 5386 /* Case1: Do not set opts_size */ 5387 rc = spdk_bdev_set_opts(&bdev_opts); 5388 CU_ASSERT(rc == -1); 5389 5390 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5391 bdev_opts.bdev_io_pool_size = 4; 5392 bdev_opts.bdev_io_cache_size = 2; 5393 bdev_opts.small_buf_pool_size = 4; 5394 5395 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5396 rc = spdk_bdev_set_opts(&bdev_opts); 5397 CU_ASSERT(rc == -1); 5398 5399 /* Case 3: Do not set valid large_buf_pool_size */ 5400 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5401 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5402 rc = spdk_bdev_set_opts(&bdev_opts); 5403 CU_ASSERT(rc == -1); 5404 5405 /* Case4: set valid large buf_pool_size */ 5406 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5407 rc = spdk_bdev_set_opts(&bdev_opts); 5408 CU_ASSERT(rc == 0); 5409 5410 /* Case5: Set different valid value for small and large buf pool */ 5411 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5412 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5413 rc = spdk_bdev_set_opts(&bdev_opts); 5414 CU_ASSERT(rc == 0); 5415 } 5416 5417 static uint64_t 5418 get_ns_time(void) 5419 { 5420 int rc; 5421 struct timespec ts; 5422 5423 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5424 CU_ASSERT(rc == 0); 5425 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5426 } 5427 5428 static int 5429 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5430 { 5431 int h1, h2; 5432 5433 if (bdev_name == NULL) { 5434 return -1; 5435 } else { 5436 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5437 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5438 5439 return spdk_max(h1, h2) + 1; 5440 } 5441 } 5442 5443 static void 5444 bdev_multi_allocation(void) 5445 { 5446 const int max_bdev_num = 1024 * 16; 5447 char name[max_bdev_num][16]; 5448 char noexist_name[] = "invalid_bdev"; 5449 struct spdk_bdev *bdev[max_bdev_num]; 5450 int i, j; 5451 uint64_t last_time; 5452 int bdev_num; 5453 int height; 5454 5455 for (j = 0; j < max_bdev_num; j++) { 5456 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5457 } 5458 5459 for (i = 0; i < 16; i++) { 5460 last_time = get_ns_time(); 5461 bdev_num = 1024 * (i + 1); 5462 for (j = 0; j < bdev_num; j++) { 5463 bdev[j] = allocate_bdev(name[j]); 5464 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5465 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5466 } 5467 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5468 (get_ns_time() - last_time) / 1000 / 1000); 5469 for (j = 0; j < bdev_num; j++) { 5470 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5471 } 5472 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5473 5474 for (j = 0; j < bdev_num; j++) { 5475 free_bdev(bdev[j]); 5476 } 5477 for (j = 0; j < bdev_num; j++) { 5478 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5479 } 5480 } 5481 } 5482 5483 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5484 5485 static int 5486 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5487 int array_size) 5488 { 5489 if (array_size > 0 && domains) { 5490 domains[0] = g_bdev_memory_domain; 5491 } 5492 5493 return 1; 5494 } 5495 5496 static void 5497 bdev_get_memory_domains(void) 5498 { 5499 struct spdk_bdev_fn_table fn_table = { 5500 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5501 }; 5502 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5503 struct spdk_memory_domain *domains[2] = {}; 5504 int rc; 5505 5506 /* bdev is NULL */ 5507 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5508 CU_ASSERT(rc == -EINVAL); 5509 5510 /* domains is NULL */ 5511 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5512 CU_ASSERT(rc == 1); 5513 5514 /* array size is 0 */ 5515 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5516 CU_ASSERT(rc == 1); 5517 5518 /* get_supported_dma_device_types op is set */ 5519 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5520 CU_ASSERT(rc == 1); 5521 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5522 5523 /* get_supported_dma_device_types op is not set */ 5524 fn_table.get_memory_domains = NULL; 5525 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5526 CU_ASSERT(rc == 0); 5527 } 5528 5529 static void 5530 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5531 { 5532 struct spdk_bdev *bdev; 5533 struct spdk_bdev_desc *desc = NULL; 5534 struct spdk_io_channel *io_ch; 5535 char io_buf[512]; 5536 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5537 struct ut_expected_io *expected_io; 5538 int rc; 5539 5540 spdk_bdev_initialize(bdev_init_cb, NULL); 5541 5542 bdev = allocate_bdev("bdev0"); 5543 bdev->md_interleave = false; 5544 bdev->md_len = 8; 5545 5546 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5547 CU_ASSERT(rc == 0); 5548 SPDK_CU_ASSERT_FATAL(desc != NULL); 5549 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5550 io_ch = spdk_bdev_get_io_channel(desc); 5551 CU_ASSERT(io_ch != NULL); 5552 5553 /* read */ 5554 g_io_done = false; 5555 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5556 if (ext_io_opts) { 5557 expected_io->md_buf = ext_io_opts->metadata; 5558 expected_io->ext_io_opts = ext_io_opts; 5559 } 5560 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5561 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5562 5563 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5564 5565 CU_ASSERT(rc == 0); 5566 CU_ASSERT(g_io_done == false); 5567 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5568 stub_complete_io(1); 5569 CU_ASSERT(g_io_done == true); 5570 5571 /* write */ 5572 g_io_done = false; 5573 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5574 if (ext_io_opts) { 5575 expected_io->md_buf = ext_io_opts->metadata; 5576 expected_io->ext_io_opts = ext_io_opts; 5577 } 5578 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5579 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5580 5581 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5582 5583 CU_ASSERT(rc == 0); 5584 CU_ASSERT(g_io_done == false); 5585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5586 stub_complete_io(1); 5587 CU_ASSERT(g_io_done == true); 5588 5589 spdk_put_io_channel(io_ch); 5590 spdk_bdev_close(desc); 5591 free_bdev(bdev); 5592 spdk_bdev_finish(bdev_fini_cb, NULL); 5593 poll_threads(); 5594 5595 } 5596 5597 static void 5598 bdev_io_ext(void) 5599 { 5600 struct spdk_bdev_ext_io_opts ext_io_opts = { 5601 .metadata = (void *)0xFF000000, 5602 .size = sizeof(ext_io_opts) 5603 }; 5604 5605 _bdev_io_ext(&ext_io_opts); 5606 } 5607 5608 static void 5609 bdev_io_ext_no_opts(void) 5610 { 5611 _bdev_io_ext(NULL); 5612 } 5613 5614 static void 5615 bdev_io_ext_invalid_opts(void) 5616 { 5617 struct spdk_bdev *bdev; 5618 struct spdk_bdev_desc *desc = NULL; 5619 struct spdk_io_channel *io_ch; 5620 char io_buf[512]; 5621 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5622 struct spdk_bdev_ext_io_opts ext_io_opts = { 5623 .metadata = (void *)0xFF000000, 5624 .size = sizeof(ext_io_opts) 5625 }; 5626 int rc; 5627 5628 spdk_bdev_initialize(bdev_init_cb, NULL); 5629 5630 bdev = allocate_bdev("bdev0"); 5631 bdev->md_interleave = false; 5632 bdev->md_len = 8; 5633 5634 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5635 CU_ASSERT(rc == 0); 5636 SPDK_CU_ASSERT_FATAL(desc != NULL); 5637 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5638 io_ch = spdk_bdev_get_io_channel(desc); 5639 CU_ASSERT(io_ch != NULL); 5640 5641 /* Test invalid ext_opts size */ 5642 ext_io_opts.size = 0; 5643 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5644 CU_ASSERT(rc == -EINVAL); 5645 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5646 CU_ASSERT(rc == -EINVAL); 5647 5648 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5649 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5650 CU_ASSERT(rc == -EINVAL); 5651 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5652 CU_ASSERT(rc == -EINVAL); 5653 5654 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5655 sizeof(ext_io_opts.metadata) - 1; 5656 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5657 CU_ASSERT(rc == -EINVAL); 5658 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5659 CU_ASSERT(rc == -EINVAL); 5660 5661 spdk_put_io_channel(io_ch); 5662 spdk_bdev_close(desc); 5663 free_bdev(bdev); 5664 spdk_bdev_finish(bdev_fini_cb, NULL); 5665 poll_threads(); 5666 } 5667 5668 static void 5669 bdev_io_ext_split(void) 5670 { 5671 struct spdk_bdev *bdev; 5672 struct spdk_bdev_desc *desc = NULL; 5673 struct spdk_io_channel *io_ch; 5674 char io_buf[512]; 5675 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5676 struct ut_expected_io *expected_io; 5677 struct spdk_bdev_ext_io_opts ext_io_opts = { 5678 .metadata = (void *)0xFF000000, 5679 .size = sizeof(ext_io_opts) 5680 }; 5681 int rc; 5682 5683 spdk_bdev_initialize(bdev_init_cb, NULL); 5684 5685 bdev = allocate_bdev("bdev0"); 5686 bdev->md_interleave = false; 5687 bdev->md_len = 8; 5688 5689 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5690 CU_ASSERT(rc == 0); 5691 SPDK_CU_ASSERT_FATAL(desc != NULL); 5692 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5693 io_ch = spdk_bdev_get_io_channel(desc); 5694 CU_ASSERT(io_ch != NULL); 5695 5696 /* Check that IO request with ext_opts and metadata is split correctly 5697 * Offset 14, length 8, payload 0xF000 5698 * Child - Offset 14, length 2, payload 0xF000 5699 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5700 */ 5701 bdev->optimal_io_boundary = 16; 5702 bdev->split_on_optimal_io_boundary = true; 5703 bdev->md_interleave = false; 5704 bdev->md_len = 8; 5705 5706 iov.iov_base = (void *)0xF000; 5707 iov.iov_len = 4096; 5708 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5709 ext_io_opts.metadata = (void *)0xFF000000; 5710 ext_io_opts.size = sizeof(ext_io_opts); 5711 g_io_done = false; 5712 5713 /* read */ 5714 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5715 expected_io->md_buf = ext_io_opts.metadata; 5716 expected_io->ext_io_opts = &ext_io_opts; 5717 expected_io->copy_opts = true; 5718 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5719 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5720 5721 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5722 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5723 expected_io->ext_io_opts = &ext_io_opts; 5724 expected_io->copy_opts = true; 5725 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5726 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5727 5728 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5729 CU_ASSERT(rc == 0); 5730 CU_ASSERT(g_io_done == false); 5731 5732 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5733 stub_complete_io(2); 5734 CU_ASSERT(g_io_done == true); 5735 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5736 5737 /* write */ 5738 g_io_done = false; 5739 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5740 expected_io->md_buf = ext_io_opts.metadata; 5741 expected_io->ext_io_opts = &ext_io_opts; 5742 expected_io->copy_opts = true; 5743 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5744 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5745 5746 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5747 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5748 expected_io->ext_io_opts = &ext_io_opts; 5749 expected_io->copy_opts = true; 5750 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5751 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5752 5753 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5754 CU_ASSERT(rc == 0); 5755 CU_ASSERT(g_io_done == false); 5756 5757 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5758 stub_complete_io(2); 5759 CU_ASSERT(g_io_done == true); 5760 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5761 5762 spdk_put_io_channel(io_ch); 5763 spdk_bdev_close(desc); 5764 free_bdev(bdev); 5765 spdk_bdev_finish(bdev_fini_cb, NULL); 5766 poll_threads(); 5767 } 5768 5769 static void 5770 bdev_io_ext_bounce_buffer(void) 5771 { 5772 struct spdk_bdev *bdev; 5773 struct spdk_bdev_desc *desc = NULL; 5774 struct spdk_io_channel *io_ch; 5775 char io_buf[512]; 5776 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5777 struct ut_expected_io *expected_io; 5778 struct spdk_bdev_ext_io_opts ext_io_opts = { 5779 .metadata = (void *)0xFF000000, 5780 .size = sizeof(ext_io_opts) 5781 }; 5782 int rc; 5783 5784 spdk_bdev_initialize(bdev_init_cb, NULL); 5785 5786 bdev = allocate_bdev("bdev0"); 5787 bdev->md_interleave = false; 5788 bdev->md_len = 8; 5789 5790 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5791 CU_ASSERT(rc == 0); 5792 SPDK_CU_ASSERT_FATAL(desc != NULL); 5793 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5794 io_ch = spdk_bdev_get_io_channel(desc); 5795 CU_ASSERT(io_ch != NULL); 5796 5797 /* Verify data pull/push 5798 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5799 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5800 5801 /* read */ 5802 g_io_done = false; 5803 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5804 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5805 expected_io->ext_io_opts = &ext_io_opts; 5806 expected_io->copy_opts = true; 5807 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5808 5809 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5810 5811 CU_ASSERT(rc == 0); 5812 CU_ASSERT(g_io_done == false); 5813 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5814 stub_complete_io(1); 5815 CU_ASSERT(g_memory_domain_push_data_called == true); 5816 CU_ASSERT(g_io_done == true); 5817 5818 /* write */ 5819 g_io_done = false; 5820 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5821 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5822 expected_io->ext_io_opts = &ext_io_opts; 5823 expected_io->copy_opts = true; 5824 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5825 5826 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5827 5828 CU_ASSERT(rc == 0); 5829 CU_ASSERT(g_memory_domain_pull_data_called == true); 5830 CU_ASSERT(g_io_done == false); 5831 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5832 stub_complete_io(1); 5833 CU_ASSERT(g_io_done == true); 5834 5835 spdk_put_io_channel(io_ch); 5836 spdk_bdev_close(desc); 5837 free_bdev(bdev); 5838 spdk_bdev_finish(bdev_fini_cb, NULL); 5839 poll_threads(); 5840 } 5841 5842 static void 5843 bdev_register_uuid_alias(void) 5844 { 5845 struct spdk_bdev *bdev, *second; 5846 char uuid[SPDK_UUID_STRING_LEN]; 5847 int rc; 5848 5849 spdk_bdev_initialize(bdev_init_cb, NULL); 5850 bdev = allocate_bdev("bdev0"); 5851 5852 /* Make sure an UUID was generated */ 5853 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5854 5855 /* Check that an UUID alias was registered */ 5856 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5857 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5858 5859 /* Unregister the bdev */ 5860 spdk_bdev_unregister(bdev, NULL, NULL); 5861 poll_threads(); 5862 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5863 5864 /* Check the same, but this time register the bdev with non-zero UUID */ 5865 rc = spdk_bdev_register(bdev); 5866 CU_ASSERT_EQUAL(rc, 0); 5867 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5868 5869 /* Unregister the bdev */ 5870 spdk_bdev_unregister(bdev, NULL, NULL); 5871 poll_threads(); 5872 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5873 5874 /* Regiser the bdev using UUID as the name */ 5875 bdev->name = uuid; 5876 rc = spdk_bdev_register(bdev); 5877 CU_ASSERT_EQUAL(rc, 0); 5878 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5879 5880 /* Unregister the bdev */ 5881 spdk_bdev_unregister(bdev, NULL, NULL); 5882 poll_threads(); 5883 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5884 5885 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5886 bdev->name = "bdev0"; 5887 second = allocate_bdev("bdev1"); 5888 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5889 rc = spdk_bdev_register(bdev); 5890 CU_ASSERT_EQUAL(rc, -EEXIST); 5891 5892 /* Regenerate the UUID and re-check */ 5893 spdk_uuid_generate(&bdev->uuid); 5894 rc = spdk_bdev_register(bdev); 5895 CU_ASSERT_EQUAL(rc, 0); 5896 5897 /* And check that both bdevs can be retrieved through their UUIDs */ 5898 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5899 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5900 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5901 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5902 5903 free_bdev(second); 5904 free_bdev(bdev); 5905 spdk_bdev_finish(bdev_fini_cb, NULL); 5906 poll_threads(); 5907 } 5908 5909 static void 5910 bdev_unregister_by_name(void) 5911 { 5912 struct spdk_bdev *bdev; 5913 int rc; 5914 5915 bdev = allocate_bdev("bdev"); 5916 5917 g_event_type1 = 0xFF; 5918 g_unregister_arg = NULL; 5919 g_unregister_rc = -1; 5920 5921 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5922 CU_ASSERT(rc == -ENODEV); 5923 5924 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5925 CU_ASSERT(rc == -ENODEV); 5926 5927 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5928 CU_ASSERT(rc == 0); 5929 5930 /* Check that unregister callback is delayed */ 5931 CU_ASSERT(g_unregister_arg == NULL); 5932 CU_ASSERT(g_unregister_rc == -1); 5933 5934 poll_threads(); 5935 5936 /* Event callback shall not be issued because device was closed */ 5937 CU_ASSERT(g_event_type1 == 0xFF); 5938 /* Unregister callback is issued */ 5939 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5940 CU_ASSERT(g_unregister_rc == 0); 5941 5942 free_bdev(bdev); 5943 } 5944 5945 static int 5946 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5947 { 5948 int *count = ctx; 5949 5950 (*count)++; 5951 5952 return 0; 5953 } 5954 5955 static void 5956 for_each_bdev_test(void) 5957 { 5958 struct spdk_bdev *bdev[8]; 5959 int rc, count; 5960 5961 bdev[0] = allocate_bdev("bdev0"); 5962 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5963 5964 bdev[1] = allocate_bdev("bdev1"); 5965 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5966 CU_ASSERT(rc == 0); 5967 5968 bdev[2] = allocate_bdev("bdev2"); 5969 5970 bdev[3] = allocate_bdev("bdev3"); 5971 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5972 CU_ASSERT(rc == 0); 5973 5974 bdev[4] = allocate_bdev("bdev4"); 5975 5976 bdev[5] = allocate_bdev("bdev5"); 5977 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5978 CU_ASSERT(rc == 0); 5979 5980 bdev[6] = allocate_bdev("bdev6"); 5981 5982 bdev[7] = allocate_bdev("bdev7"); 5983 5984 count = 0; 5985 rc = spdk_for_each_bdev(&count, count_bdevs); 5986 CU_ASSERT(rc == 0); 5987 CU_ASSERT(count == 7); 5988 5989 count = 0; 5990 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5991 CU_ASSERT(rc == 0); 5992 CU_ASSERT(count == 4); 5993 5994 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 5995 free_bdev(bdev[0]); 5996 free_bdev(bdev[1]); 5997 free_bdev(bdev[2]); 5998 free_bdev(bdev[3]); 5999 free_bdev(bdev[4]); 6000 free_bdev(bdev[5]); 6001 free_bdev(bdev[6]); 6002 free_bdev(bdev[7]); 6003 } 6004 6005 static void 6006 bdev_seek_test(void) 6007 { 6008 struct spdk_bdev *bdev; 6009 struct spdk_bdev_desc *desc = NULL; 6010 struct spdk_io_channel *io_ch; 6011 int rc; 6012 6013 spdk_bdev_initialize(bdev_init_cb, NULL); 6014 poll_threads(); 6015 6016 bdev = allocate_bdev("bdev0"); 6017 6018 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6019 CU_ASSERT(rc == 0); 6020 poll_threads(); 6021 SPDK_CU_ASSERT_FATAL(desc != NULL); 6022 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6023 io_ch = spdk_bdev_get_io_channel(desc); 6024 CU_ASSERT(io_ch != NULL); 6025 6026 /* Seek data not supported */ 6027 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6028 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6029 CU_ASSERT(rc == 0); 6030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6031 poll_threads(); 6032 CU_ASSERT(g_seek_offset == 0); 6033 6034 /* Seek hole not supported */ 6035 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6036 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6037 CU_ASSERT(rc == 0); 6038 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6039 poll_threads(); 6040 CU_ASSERT(g_seek_offset == UINT64_MAX); 6041 6042 /* Seek data supported */ 6043 g_seek_data_offset = 12345; 6044 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6045 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6046 CU_ASSERT(rc == 0); 6047 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6048 stub_complete_io(1); 6049 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6050 CU_ASSERT(g_seek_offset == 12345); 6051 6052 /* Seek hole supported */ 6053 g_seek_hole_offset = 67890; 6054 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6055 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6056 CU_ASSERT(rc == 0); 6057 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6058 stub_complete_io(1); 6059 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6060 CU_ASSERT(g_seek_offset == 67890); 6061 6062 spdk_put_io_channel(io_ch); 6063 spdk_bdev_close(desc); 6064 free_bdev(bdev); 6065 spdk_bdev_finish(bdev_fini_cb, NULL); 6066 poll_threads(); 6067 } 6068 6069 static void 6070 bdev_copy(void) 6071 { 6072 struct spdk_bdev *bdev; 6073 struct spdk_bdev_desc *desc = NULL; 6074 struct spdk_io_channel *ioch; 6075 struct ut_expected_io *expected_io; 6076 uint64_t src_offset, num_blocks; 6077 uint32_t num_completed; 6078 int rc; 6079 6080 spdk_bdev_initialize(bdev_init_cb, NULL); 6081 bdev = allocate_bdev("bdev"); 6082 6083 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6084 CU_ASSERT_EQUAL(rc, 0); 6085 SPDK_CU_ASSERT_FATAL(desc != NULL); 6086 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6087 ioch = spdk_bdev_get_io_channel(desc); 6088 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6089 6090 fn_table.submit_request = stub_submit_request; 6091 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6092 6093 /* First test that if the bdev supports copy, the request won't be split */ 6094 bdev->md_len = 0; 6095 bdev->blocklen = 4096; 6096 num_blocks = 512; 6097 src_offset = bdev->blockcnt - num_blocks; 6098 6099 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6100 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6101 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6102 CU_ASSERT_EQUAL(rc, 0); 6103 num_completed = stub_complete_io(1); 6104 CU_ASSERT_EQUAL(num_completed, 1); 6105 6106 /* Check that if copy is not supported it'll fail */ 6107 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6108 6109 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6110 CU_ASSERT_EQUAL(rc, -ENOTSUP); 6111 6112 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6113 spdk_put_io_channel(ioch); 6114 spdk_bdev_close(desc); 6115 free_bdev(bdev); 6116 spdk_bdev_finish(bdev_fini_cb, NULL); 6117 poll_threads(); 6118 } 6119 6120 static void 6121 bdev_copy_split_test(void) 6122 { 6123 struct spdk_bdev *bdev; 6124 struct spdk_bdev_desc *desc = NULL; 6125 struct spdk_io_channel *ioch; 6126 struct spdk_bdev_channel *bdev_ch; 6127 struct ut_expected_io *expected_io; 6128 struct spdk_bdev_opts bdev_opts = {}; 6129 uint32_t i, num_outstanding; 6130 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6131 int rc; 6132 6133 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6134 bdev_opts.bdev_io_pool_size = 512; 6135 bdev_opts.bdev_io_cache_size = 64; 6136 rc = spdk_bdev_set_opts(&bdev_opts); 6137 CU_ASSERT(rc == 0); 6138 6139 spdk_bdev_initialize(bdev_init_cb, NULL); 6140 bdev = allocate_bdev("bdev"); 6141 6142 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6143 CU_ASSERT_EQUAL(rc, 0); 6144 SPDK_CU_ASSERT_FATAL(desc != NULL); 6145 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6146 ioch = spdk_bdev_get_io_channel(desc); 6147 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6148 bdev_ch = spdk_io_channel_get_ctx(ioch); 6149 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6150 6151 fn_table.submit_request = stub_submit_request; 6152 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6153 6154 /* Case 1: First test the request won't be split */ 6155 num_blocks = 32; 6156 src_offset = bdev->blockcnt - num_blocks; 6157 6158 g_io_done = false; 6159 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6161 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6162 CU_ASSERT_EQUAL(rc, 0); 6163 CU_ASSERT(g_io_done == false); 6164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6165 stub_complete_io(1); 6166 CU_ASSERT(g_io_done == true); 6167 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6168 6169 /* Case 2: Test the split with 2 children requests */ 6170 max_copy_blocks = 8; 6171 bdev->max_copy = max_copy_blocks; 6172 num_children = 2; 6173 num_blocks = max_copy_blocks * num_children; 6174 offset = 0; 6175 src_offset = bdev->blockcnt - num_blocks; 6176 6177 g_io_done = false; 6178 for (i = 0; i < num_children; i++) { 6179 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6180 src_offset + offset, max_copy_blocks); 6181 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6182 offset += max_copy_blocks; 6183 } 6184 6185 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6186 CU_ASSERT_EQUAL(rc, 0); 6187 CU_ASSERT(g_io_done == false); 6188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6189 stub_complete_io(num_children); 6190 CU_ASSERT(g_io_done == true); 6191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6192 6193 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6194 num_children = 15; 6195 num_blocks = max_copy_blocks * num_children; 6196 offset = 0; 6197 src_offset = bdev->blockcnt - num_blocks; 6198 6199 g_io_done = false; 6200 for (i = 0; i < num_children; i++) { 6201 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6202 src_offset + offset, max_copy_blocks); 6203 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6204 offset += max_copy_blocks; 6205 } 6206 6207 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6208 CU_ASSERT_EQUAL(rc, 0); 6209 CU_ASSERT(g_io_done == false); 6210 6211 while (num_children > 0) { 6212 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6214 stub_complete_io(num_outstanding); 6215 num_children -= num_outstanding; 6216 } 6217 CU_ASSERT(g_io_done == true); 6218 6219 spdk_put_io_channel(ioch); 6220 spdk_bdev_close(desc); 6221 free_bdev(bdev); 6222 spdk_bdev_finish(bdev_fini_cb, NULL); 6223 poll_threads(); 6224 } 6225 6226 int 6227 main(int argc, char **argv) 6228 { 6229 CU_pSuite suite = NULL; 6230 unsigned int num_failures; 6231 6232 CU_set_error_action(CUEA_ABORT); 6233 CU_initialize_registry(); 6234 6235 suite = CU_add_suite("bdev", null_init, null_clean); 6236 6237 CU_ADD_TEST(suite, bytes_to_blocks_test); 6238 CU_ADD_TEST(suite, num_blocks_test); 6239 CU_ADD_TEST(suite, io_valid_test); 6240 CU_ADD_TEST(suite, open_write_test); 6241 CU_ADD_TEST(suite, claim_test); 6242 CU_ADD_TEST(suite, alias_add_del_test); 6243 CU_ADD_TEST(suite, get_device_stat_test); 6244 CU_ADD_TEST(suite, bdev_io_types_test); 6245 CU_ADD_TEST(suite, bdev_io_wait_test); 6246 CU_ADD_TEST(suite, bdev_io_spans_split_test); 6247 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 6248 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 6249 CU_ADD_TEST(suite, bdev_io_mix_split_test); 6250 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 6251 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 6252 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 6253 CU_ADD_TEST(suite, bdev_io_alignment); 6254 CU_ADD_TEST(suite, bdev_histograms); 6255 CU_ADD_TEST(suite, bdev_write_zeroes); 6256 CU_ADD_TEST(suite, bdev_compare_and_write); 6257 CU_ADD_TEST(suite, bdev_compare); 6258 CU_ADD_TEST(suite, bdev_compare_emulated); 6259 CU_ADD_TEST(suite, bdev_zcopy_write); 6260 CU_ADD_TEST(suite, bdev_zcopy_read); 6261 CU_ADD_TEST(suite, bdev_open_while_hotremove); 6262 CU_ADD_TEST(suite, bdev_close_while_hotremove); 6263 CU_ADD_TEST(suite, bdev_open_ext); 6264 CU_ADD_TEST(suite, bdev_open_ext_unregister); 6265 CU_ADD_TEST(suite, bdev_set_io_timeout); 6266 CU_ADD_TEST(suite, bdev_set_qd_sampling); 6267 CU_ADD_TEST(suite, lba_range_overlap); 6268 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 6269 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 6270 CU_ADD_TEST(suite, lock_lba_range_overlapped); 6271 CU_ADD_TEST(suite, bdev_io_abort); 6272 CU_ADD_TEST(suite, bdev_unmap); 6273 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 6274 CU_ADD_TEST(suite, bdev_set_options_test); 6275 CU_ADD_TEST(suite, bdev_multi_allocation); 6276 CU_ADD_TEST(suite, bdev_get_memory_domains); 6277 CU_ADD_TEST(suite, bdev_io_ext); 6278 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 6279 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 6280 CU_ADD_TEST(suite, bdev_io_ext_split); 6281 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 6282 CU_ADD_TEST(suite, bdev_register_uuid_alias); 6283 CU_ADD_TEST(suite, bdev_unregister_by_name); 6284 CU_ADD_TEST(suite, for_each_bdev_test); 6285 CU_ADD_TEST(suite, bdev_seek_test); 6286 CU_ADD_TEST(suite, bdev_copy); 6287 CU_ADD_TEST(suite, bdev_copy_split_test); 6288 6289 allocate_cores(1); 6290 allocate_threads(1); 6291 set_thread(0); 6292 6293 CU_basic_set_mode(CU_BRM_VERBOSE); 6294 CU_basic_run_tests(); 6295 num_failures = CU_get_number_of_failures(); 6296 CU_cleanup_registry(); 6297 6298 free_threads(); 6299 free_cores(); 6300 6301 return num_failures; 6302 } 6303