1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t length; 90 int iovcnt; 91 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 92 void *md_buf; 93 struct spdk_bdev_ext_io_opts *ext_io_opts; 94 bool copy_opts; 95 TAILQ_ENTRY(ut_expected_io) link; 96 }; 97 98 struct bdev_ut_channel { 99 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 100 uint32_t outstanding_io_count; 101 TAILQ_HEAD(, ut_expected_io) expected_io; 102 }; 103 104 static bool g_io_done; 105 static struct spdk_bdev_io *g_bdev_io; 106 static enum spdk_bdev_io_status g_io_status; 107 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 108 static uint32_t g_bdev_ut_io_device; 109 static struct bdev_ut_channel *g_bdev_ut_channel; 110 static void *g_compare_read_buf; 111 static uint32_t g_compare_read_buf_len; 112 static void *g_compare_write_buf; 113 static uint32_t g_compare_write_buf_len; 114 static void *g_compare_md_buf; 115 static bool g_abort_done; 116 static enum spdk_bdev_io_status g_abort_status; 117 static void *g_zcopy_read_buf; 118 static uint32_t g_zcopy_read_buf_len; 119 static void *g_zcopy_write_buf; 120 static uint32_t g_zcopy_write_buf_len; 121 static struct spdk_bdev_io *g_zcopy_bdev_io; 122 123 static struct ut_expected_io * 124 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 125 { 126 struct ut_expected_io *expected_io; 127 128 expected_io = calloc(1, sizeof(*expected_io)); 129 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 130 131 expected_io->type = type; 132 expected_io->offset = offset; 133 expected_io->length = length; 134 expected_io->iovcnt = iovcnt; 135 136 return expected_io; 137 } 138 139 static void 140 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 141 { 142 expected_io->iov[pos].iov_base = base; 143 expected_io->iov[pos].iov_len = len; 144 } 145 146 static void 147 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 148 { 149 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 150 struct ut_expected_io *expected_io; 151 struct iovec *iov, *expected_iov; 152 struct spdk_bdev_io *bio_to_abort; 153 int i; 154 155 g_bdev_io = bdev_io; 156 157 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 158 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 159 160 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 161 CU_ASSERT(g_compare_read_buf_len == len); 162 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 163 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 164 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 165 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 166 } 167 } 168 169 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 170 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 171 172 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 173 CU_ASSERT(g_compare_write_buf_len == len); 174 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 175 } 176 177 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 178 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 179 180 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 181 CU_ASSERT(g_compare_read_buf_len == len); 182 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 183 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 184 } 185 if (bdev_io->u.bdev.md_buf && 186 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 187 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 188 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 189 } 190 } 191 192 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 193 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 194 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 195 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 196 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 197 ch->outstanding_io_count--; 198 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 199 break; 200 } 201 } 202 } 203 } 204 205 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 206 if (bdev_io->u.bdev.zcopy.start) { 207 g_zcopy_bdev_io = bdev_io; 208 if (bdev_io->u.bdev.zcopy.populate) { 209 /* Start of a read */ 210 CU_ASSERT(g_zcopy_read_buf != NULL); 211 CU_ASSERT(g_zcopy_read_buf_len > 0); 212 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 213 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 214 bdev_io->u.bdev.iovcnt = 1; 215 } else { 216 /* Start of a write */ 217 CU_ASSERT(g_zcopy_write_buf != NULL); 218 CU_ASSERT(g_zcopy_write_buf_len > 0); 219 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 220 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 221 bdev_io->u.bdev.iovcnt = 1; 222 } 223 } else { 224 if (bdev_io->u.bdev.zcopy.commit) { 225 /* End of write */ 226 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 227 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 228 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 229 g_zcopy_write_buf = NULL; 230 g_zcopy_write_buf_len = 0; 231 } else { 232 /* End of read */ 233 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 234 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 235 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 236 g_zcopy_read_buf = NULL; 237 g_zcopy_read_buf_len = 0; 238 } 239 } 240 } 241 242 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 243 ch->outstanding_io_count++; 244 245 expected_io = TAILQ_FIRST(&ch->expected_io); 246 if (expected_io == NULL) { 247 return; 248 } 249 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 250 251 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 252 CU_ASSERT(bdev_io->type == expected_io->type); 253 } 254 255 if (expected_io->md_buf != NULL) { 256 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 257 if (bdev_io->u.bdev.ext_opts) { 258 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 259 } 260 } 261 262 if (expected_io->copy_opts) { 263 if (expected_io->ext_io_opts) { 264 /* opts are not NULL so it should have been copied */ 265 CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts); 266 CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy); 267 /* internal opts always points to opts passed */ 268 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts); 269 } else { 270 /* passed opts was NULL so we expect bdev_io opts to be NULL */ 271 CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL); 272 } 273 } else { 274 /* opts were not copied so they should be equal */ 275 CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts); 276 } 277 278 if (expected_io->length == 0) { 279 free(expected_io); 280 return; 281 } 282 283 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 284 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 285 286 if (expected_io->iovcnt == 0) { 287 free(expected_io); 288 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 289 return; 290 } 291 292 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 293 for (i = 0; i < expected_io->iovcnt; i++) { 294 expected_iov = &expected_io->iov[i]; 295 if (bdev_io->internal.orig_iovcnt == 0) { 296 iov = &bdev_io->u.bdev.iovs[i]; 297 } else { 298 iov = bdev_io->internal.orig_iovs; 299 } 300 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 301 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 302 } 303 304 free(expected_io); 305 } 306 307 static void 308 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 309 struct spdk_bdev_io *bdev_io, bool success) 310 { 311 CU_ASSERT(success == true); 312 313 stub_submit_request(_ch, bdev_io); 314 } 315 316 static void 317 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 318 { 319 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 320 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 321 } 322 323 static uint32_t 324 stub_complete_io(uint32_t num_to_complete) 325 { 326 struct bdev_ut_channel *ch = g_bdev_ut_channel; 327 struct spdk_bdev_io *bdev_io; 328 static enum spdk_bdev_io_status io_status; 329 uint32_t num_completed = 0; 330 331 while (num_completed < num_to_complete) { 332 if (TAILQ_EMPTY(&ch->outstanding_io)) { 333 break; 334 } 335 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 336 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 337 ch->outstanding_io_count--; 338 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 339 g_io_exp_status; 340 spdk_bdev_io_complete(bdev_io, io_status); 341 num_completed++; 342 } 343 344 return num_completed; 345 } 346 347 static struct spdk_io_channel * 348 bdev_ut_get_io_channel(void *ctx) 349 { 350 return spdk_get_io_channel(&g_bdev_ut_io_device); 351 } 352 353 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 354 [SPDK_BDEV_IO_TYPE_READ] = true, 355 [SPDK_BDEV_IO_TYPE_WRITE] = true, 356 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 357 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 358 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 359 [SPDK_BDEV_IO_TYPE_RESET] = true, 360 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 361 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 362 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 363 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 364 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 365 [SPDK_BDEV_IO_TYPE_ABORT] = true, 366 }; 367 368 static void 369 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 370 { 371 g_io_types_supported[io_type] = enable; 372 } 373 374 static bool 375 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 376 { 377 return g_io_types_supported[io_type]; 378 } 379 380 static struct spdk_bdev_fn_table fn_table = { 381 .destruct = stub_destruct, 382 .submit_request = stub_submit_request, 383 .get_io_channel = bdev_ut_get_io_channel, 384 .io_type_supported = stub_io_type_supported, 385 }; 386 387 static int 388 bdev_ut_create_ch(void *io_device, void *ctx_buf) 389 { 390 struct bdev_ut_channel *ch = ctx_buf; 391 392 CU_ASSERT(g_bdev_ut_channel == NULL); 393 g_bdev_ut_channel = ch; 394 395 TAILQ_INIT(&ch->outstanding_io); 396 ch->outstanding_io_count = 0; 397 TAILQ_INIT(&ch->expected_io); 398 return 0; 399 } 400 401 static void 402 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 403 { 404 CU_ASSERT(g_bdev_ut_channel != NULL); 405 g_bdev_ut_channel = NULL; 406 } 407 408 struct spdk_bdev_module bdev_ut_if; 409 410 static int 411 bdev_ut_module_init(void) 412 { 413 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 414 sizeof(struct bdev_ut_channel), NULL); 415 spdk_bdev_module_init_done(&bdev_ut_if); 416 return 0; 417 } 418 419 static void 420 bdev_ut_module_fini(void) 421 { 422 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 423 } 424 425 struct spdk_bdev_module bdev_ut_if = { 426 .name = "bdev_ut", 427 .module_init = bdev_ut_module_init, 428 .module_fini = bdev_ut_module_fini, 429 .async_init = true, 430 }; 431 432 static void vbdev_ut_examine(struct spdk_bdev *bdev); 433 434 static int 435 vbdev_ut_module_init(void) 436 { 437 return 0; 438 } 439 440 static void 441 vbdev_ut_module_fini(void) 442 { 443 } 444 445 struct spdk_bdev_module vbdev_ut_if = { 446 .name = "vbdev_ut", 447 .module_init = vbdev_ut_module_init, 448 .module_fini = vbdev_ut_module_fini, 449 .examine_config = vbdev_ut_examine, 450 }; 451 452 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 453 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 454 455 static void 456 vbdev_ut_examine(struct spdk_bdev *bdev) 457 { 458 spdk_bdev_module_examine_done(&vbdev_ut_if); 459 } 460 461 static struct spdk_bdev * 462 allocate_bdev(char *name) 463 { 464 struct spdk_bdev *bdev; 465 int rc; 466 467 bdev = calloc(1, sizeof(*bdev)); 468 SPDK_CU_ASSERT_FATAL(bdev != NULL); 469 470 bdev->name = name; 471 bdev->fn_table = &fn_table; 472 bdev->module = &bdev_ut_if; 473 bdev->blockcnt = 1024; 474 bdev->blocklen = 512; 475 476 rc = spdk_bdev_register(bdev); 477 poll_threads(); 478 CU_ASSERT(rc == 0); 479 480 return bdev; 481 } 482 483 static struct spdk_bdev * 484 allocate_vbdev(char *name) 485 { 486 struct spdk_bdev *bdev; 487 int rc; 488 489 bdev = calloc(1, sizeof(*bdev)); 490 SPDK_CU_ASSERT_FATAL(bdev != NULL); 491 492 bdev->name = name; 493 bdev->fn_table = &fn_table; 494 bdev->module = &vbdev_ut_if; 495 496 rc = spdk_bdev_register(bdev); 497 poll_threads(); 498 CU_ASSERT(rc == 0); 499 500 return bdev; 501 } 502 503 static void 504 free_bdev(struct spdk_bdev *bdev) 505 { 506 spdk_bdev_unregister(bdev, NULL, NULL); 507 poll_threads(); 508 memset(bdev, 0xFF, sizeof(*bdev)); 509 free(bdev); 510 } 511 512 static void 513 free_vbdev(struct spdk_bdev *bdev) 514 { 515 spdk_bdev_unregister(bdev, NULL, NULL); 516 poll_threads(); 517 memset(bdev, 0xFF, sizeof(*bdev)); 518 free(bdev); 519 } 520 521 static void 522 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 523 { 524 const char *bdev_name; 525 526 CU_ASSERT(bdev != NULL); 527 CU_ASSERT(rc == 0); 528 bdev_name = spdk_bdev_get_name(bdev); 529 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 530 531 free(stat); 532 533 *(bool *)cb_arg = true; 534 } 535 536 static void 537 bdev_unregister_cb(void *cb_arg, int rc) 538 { 539 g_unregister_arg = cb_arg; 540 g_unregister_rc = rc; 541 } 542 543 static void 544 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 545 { 546 } 547 548 static void 549 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 550 { 551 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 552 553 g_event_type1 = type; 554 if (SPDK_BDEV_EVENT_REMOVE == type) { 555 spdk_bdev_close(desc); 556 } 557 } 558 559 static void 560 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 561 { 562 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 563 564 g_event_type2 = type; 565 if (SPDK_BDEV_EVENT_REMOVE == type) { 566 spdk_bdev_close(desc); 567 } 568 } 569 570 static void 571 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 572 { 573 g_event_type3 = type; 574 } 575 576 static void 577 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 578 { 579 g_event_type4 = type; 580 } 581 582 static void 583 get_device_stat_test(void) 584 { 585 struct spdk_bdev *bdev; 586 struct spdk_bdev_io_stat *stat; 587 bool done; 588 589 bdev = allocate_bdev("bdev0"); 590 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 591 if (stat == NULL) { 592 free_bdev(bdev); 593 return; 594 } 595 596 done = false; 597 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 598 while (!done) { poll_threads(); } 599 600 free_bdev(bdev); 601 } 602 603 static void 604 open_write_test(void) 605 { 606 struct spdk_bdev *bdev[9]; 607 struct spdk_bdev_desc *desc[9] = {}; 608 int rc; 609 610 /* 611 * Create a tree of bdevs to test various open w/ write cases. 612 * 613 * bdev0 through bdev3 are physical block devices, such as NVMe 614 * namespaces or Ceph block devices. 615 * 616 * bdev4 is a virtual bdev with multiple base bdevs. This models 617 * caching or RAID use cases. 618 * 619 * bdev5 through bdev7 are all virtual bdevs with the same base 620 * bdev (except bdev7). This models partitioning or logical volume 621 * use cases. 622 * 623 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 624 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 625 * models caching, RAID, partitioning or logical volumes use cases. 626 * 627 * bdev8 is a virtual bdev with multiple base bdevs, but these 628 * base bdevs are themselves virtual bdevs. 629 * 630 * bdev8 631 * | 632 * +----------+ 633 * | | 634 * bdev4 bdev5 bdev6 bdev7 635 * | | | | 636 * +---+---+ +---+ + +---+---+ 637 * | | \ | / \ 638 * bdev0 bdev1 bdev2 bdev3 639 */ 640 641 bdev[0] = allocate_bdev("bdev0"); 642 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 643 CU_ASSERT(rc == 0); 644 645 bdev[1] = allocate_bdev("bdev1"); 646 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 647 CU_ASSERT(rc == 0); 648 649 bdev[2] = allocate_bdev("bdev2"); 650 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 651 CU_ASSERT(rc == 0); 652 653 bdev[3] = allocate_bdev("bdev3"); 654 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 655 CU_ASSERT(rc == 0); 656 657 bdev[4] = allocate_vbdev("bdev4"); 658 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 659 CU_ASSERT(rc == 0); 660 661 bdev[5] = allocate_vbdev("bdev5"); 662 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 663 CU_ASSERT(rc == 0); 664 665 bdev[6] = allocate_vbdev("bdev6"); 666 667 bdev[7] = allocate_vbdev("bdev7"); 668 669 bdev[8] = allocate_vbdev("bdev8"); 670 671 /* Open bdev0 read-only. This should succeed. */ 672 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 673 CU_ASSERT(rc == 0); 674 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 675 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 676 spdk_bdev_close(desc[0]); 677 678 /* 679 * Open bdev1 read/write. This should fail since bdev1 has been claimed 680 * by a vbdev module. 681 */ 682 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 683 CU_ASSERT(rc == -EPERM); 684 685 /* 686 * Open bdev4 read/write. This should fail since bdev3 has been claimed 687 * by a vbdev module. 688 */ 689 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 690 CU_ASSERT(rc == -EPERM); 691 692 /* Open bdev4 read-only. This should succeed. */ 693 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 694 CU_ASSERT(rc == 0); 695 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 696 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 697 spdk_bdev_close(desc[4]); 698 699 /* 700 * Open bdev8 read/write. This should succeed since it is a leaf 701 * bdev. 702 */ 703 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 704 CU_ASSERT(rc == 0); 705 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 706 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 707 spdk_bdev_close(desc[8]); 708 709 /* 710 * Open bdev5 read/write. This should fail since bdev4 has been claimed 711 * by a vbdev module. 712 */ 713 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 714 CU_ASSERT(rc == -EPERM); 715 716 /* Open bdev4 read-only. This should succeed. */ 717 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 718 CU_ASSERT(rc == 0); 719 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 720 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 721 spdk_bdev_close(desc[5]); 722 723 free_vbdev(bdev[8]); 724 725 free_vbdev(bdev[5]); 726 free_vbdev(bdev[6]); 727 free_vbdev(bdev[7]); 728 729 free_vbdev(bdev[4]); 730 731 free_bdev(bdev[0]); 732 free_bdev(bdev[1]); 733 free_bdev(bdev[2]); 734 free_bdev(bdev[3]); 735 } 736 737 static void 738 claim_test(void) 739 { 740 struct spdk_bdev *bdev; 741 struct spdk_bdev_desc *desc, *open_desc; 742 int rc; 743 uint32_t count; 744 745 /* 746 * A vbdev that uses a read-only bdev may need it to remain read-only. 747 * To do so, it opens the bdev read-only, then claims it without 748 * passing a spdk_bdev_desc. 749 */ 750 bdev = allocate_bdev("bdev0"); 751 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 752 CU_ASSERT(rc == 0); 753 CU_ASSERT(desc->write == false); 754 755 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 756 CU_ASSERT(rc == 0); 757 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 758 759 /* There should be only one open descriptor and it should still be ro */ 760 count = 0; 761 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 762 CU_ASSERT(open_desc == desc); 763 CU_ASSERT(!open_desc->write); 764 count++; 765 } 766 CU_ASSERT(count == 1); 767 768 /* A read-only bdev is upgraded to read-write if desc is passed. */ 769 spdk_bdev_module_release_bdev(bdev); 770 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 771 CU_ASSERT(rc == 0); 772 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 773 774 /* There should be only one open descriptor and it should be rw */ 775 count = 0; 776 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 777 CU_ASSERT(open_desc == desc); 778 CU_ASSERT(open_desc->write); 779 count++; 780 } 781 CU_ASSERT(count == 1); 782 783 spdk_bdev_close(desc); 784 free_bdev(bdev); 785 } 786 787 static void 788 bytes_to_blocks_test(void) 789 { 790 struct spdk_bdev bdev; 791 uint64_t offset_blocks, num_blocks; 792 793 memset(&bdev, 0, sizeof(bdev)); 794 795 bdev.blocklen = 512; 796 797 /* All parameters valid */ 798 offset_blocks = 0; 799 num_blocks = 0; 800 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 801 CU_ASSERT(offset_blocks == 1); 802 CU_ASSERT(num_blocks == 2); 803 804 /* Offset not a block multiple */ 805 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 806 807 /* Length not a block multiple */ 808 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 809 810 /* In case blocklen not the power of two */ 811 bdev.blocklen = 100; 812 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 813 CU_ASSERT(offset_blocks == 1); 814 CU_ASSERT(num_blocks == 2); 815 816 /* Offset not a block multiple */ 817 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 818 819 /* Length not a block multiple */ 820 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 821 } 822 823 static void 824 num_blocks_test(void) 825 { 826 struct spdk_bdev bdev; 827 struct spdk_bdev_desc *desc = NULL; 828 int rc; 829 830 memset(&bdev, 0, sizeof(bdev)); 831 bdev.name = "num_blocks"; 832 bdev.fn_table = &fn_table; 833 bdev.module = &bdev_ut_if; 834 spdk_bdev_register(&bdev); 835 poll_threads(); 836 spdk_bdev_notify_blockcnt_change(&bdev, 50); 837 838 /* Growing block number */ 839 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 840 /* Shrinking block number */ 841 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 842 843 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 844 CU_ASSERT(rc == 0); 845 SPDK_CU_ASSERT_FATAL(desc != NULL); 846 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 847 848 /* Growing block number */ 849 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 850 /* Shrinking block number */ 851 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 852 853 g_event_type1 = 0xFF; 854 /* Growing block number */ 855 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 856 857 poll_threads(); 858 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 859 860 g_event_type1 = 0xFF; 861 /* Growing block number and closing */ 862 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 863 864 spdk_bdev_close(desc); 865 spdk_bdev_unregister(&bdev, NULL, NULL); 866 867 poll_threads(); 868 869 /* Callback is not called for closed device */ 870 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 871 } 872 873 static void 874 io_valid_test(void) 875 { 876 struct spdk_bdev bdev; 877 878 memset(&bdev, 0, sizeof(bdev)); 879 880 bdev.blocklen = 512; 881 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 882 883 spdk_bdev_notify_blockcnt_change(&bdev, 100); 884 885 /* All parameters valid */ 886 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 887 888 /* Last valid block */ 889 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 890 891 /* Offset past end of bdev */ 892 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 893 894 /* Offset + length past end of bdev */ 895 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 896 897 /* Offset near end of uint64_t range (2^64 - 1) */ 898 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 899 900 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 901 } 902 903 static void 904 alias_add_del_test(void) 905 { 906 struct spdk_bdev *bdev[3]; 907 int rc; 908 909 /* Creating and registering bdevs */ 910 bdev[0] = allocate_bdev("bdev0"); 911 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 912 913 bdev[1] = allocate_bdev("bdev1"); 914 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 915 916 bdev[2] = allocate_bdev("bdev2"); 917 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 918 919 poll_threads(); 920 921 /* 922 * Trying adding an alias identical to name. 923 * Alias is identical to name, so it can not be added to aliases list 924 */ 925 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 926 CU_ASSERT(rc == -EEXIST); 927 928 /* 929 * Trying to add empty alias, 930 * this one should fail 931 */ 932 rc = spdk_bdev_alias_add(bdev[0], NULL); 933 CU_ASSERT(rc == -EINVAL); 934 935 /* Trying adding same alias to two different registered bdevs */ 936 937 /* Alias is used first time, so this one should pass */ 938 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 939 CU_ASSERT(rc == 0); 940 941 /* Alias was added to another bdev, so this one should fail */ 942 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 943 CU_ASSERT(rc == -EEXIST); 944 945 /* Alias is used first time, so this one should pass */ 946 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 947 CU_ASSERT(rc == 0); 948 949 /* Trying removing an alias from registered bdevs */ 950 951 /* Alias is not on a bdev aliases list, so this one should fail */ 952 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 953 CU_ASSERT(rc == -ENOENT); 954 955 /* Alias is present on a bdev aliases list, so this one should pass */ 956 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 957 CU_ASSERT(rc == 0); 958 959 /* Alias is present on a bdev aliases list, so this one should pass */ 960 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 961 CU_ASSERT(rc == 0); 962 963 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 964 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 965 CU_ASSERT(rc != 0); 966 967 /* Trying to del all alias from empty alias list */ 968 spdk_bdev_alias_del_all(bdev[2]); 969 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 970 971 /* Trying to del all alias from non-empty alias list */ 972 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 973 CU_ASSERT(rc == 0); 974 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 975 CU_ASSERT(rc == 0); 976 spdk_bdev_alias_del_all(bdev[2]); 977 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 978 979 /* Unregister and free bdevs */ 980 spdk_bdev_unregister(bdev[0], NULL, NULL); 981 spdk_bdev_unregister(bdev[1], NULL, NULL); 982 spdk_bdev_unregister(bdev[2], NULL, NULL); 983 984 poll_threads(); 985 986 free(bdev[0]); 987 free(bdev[1]); 988 free(bdev[2]); 989 } 990 991 static void 992 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 993 { 994 g_io_done = true; 995 g_io_status = bdev_io->internal.status; 996 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 997 (bdev_io->u.bdev.zcopy.start)) { 998 g_zcopy_bdev_io = bdev_io; 999 } else { 1000 spdk_bdev_free_io(bdev_io); 1001 g_zcopy_bdev_io = NULL; 1002 } 1003 } 1004 1005 static void 1006 bdev_init_cb(void *arg, int rc) 1007 { 1008 CU_ASSERT(rc == 0); 1009 } 1010 1011 static void 1012 bdev_fini_cb(void *arg) 1013 { 1014 } 1015 1016 struct bdev_ut_io_wait_entry { 1017 struct spdk_bdev_io_wait_entry entry; 1018 struct spdk_io_channel *io_ch; 1019 struct spdk_bdev_desc *desc; 1020 bool submitted; 1021 }; 1022 1023 static void 1024 io_wait_cb(void *arg) 1025 { 1026 struct bdev_ut_io_wait_entry *entry = arg; 1027 int rc; 1028 1029 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1030 CU_ASSERT(rc == 0); 1031 entry->submitted = true; 1032 } 1033 1034 static void 1035 bdev_io_types_test(void) 1036 { 1037 struct spdk_bdev *bdev; 1038 struct spdk_bdev_desc *desc = NULL; 1039 struct spdk_io_channel *io_ch; 1040 struct spdk_bdev_opts bdev_opts = {}; 1041 int rc; 1042 1043 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1044 bdev_opts.bdev_io_pool_size = 4; 1045 bdev_opts.bdev_io_cache_size = 2; 1046 1047 rc = spdk_bdev_set_opts(&bdev_opts); 1048 CU_ASSERT(rc == 0); 1049 spdk_bdev_initialize(bdev_init_cb, NULL); 1050 poll_threads(); 1051 1052 bdev = allocate_bdev("bdev0"); 1053 1054 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1055 CU_ASSERT(rc == 0); 1056 poll_threads(); 1057 SPDK_CU_ASSERT_FATAL(desc != NULL); 1058 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1059 io_ch = spdk_bdev_get_io_channel(desc); 1060 CU_ASSERT(io_ch != NULL); 1061 1062 /* WRITE and WRITE ZEROES are not supported */ 1063 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1064 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1065 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1066 CU_ASSERT(rc == -ENOTSUP); 1067 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1068 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1069 1070 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1071 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1072 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1073 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1074 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1075 CU_ASSERT(rc == -ENOTSUP); 1076 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1077 CU_ASSERT(rc == -ENOTSUP); 1078 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1079 CU_ASSERT(rc == -ENOTSUP); 1080 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1081 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1082 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1083 1084 spdk_put_io_channel(io_ch); 1085 spdk_bdev_close(desc); 1086 free_bdev(bdev); 1087 spdk_bdev_finish(bdev_fini_cb, NULL); 1088 poll_threads(); 1089 } 1090 1091 static void 1092 bdev_io_wait_test(void) 1093 { 1094 struct spdk_bdev *bdev; 1095 struct spdk_bdev_desc *desc = NULL; 1096 struct spdk_io_channel *io_ch; 1097 struct spdk_bdev_opts bdev_opts = {}; 1098 struct bdev_ut_io_wait_entry io_wait_entry; 1099 struct bdev_ut_io_wait_entry io_wait_entry2; 1100 int rc; 1101 1102 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1103 bdev_opts.bdev_io_pool_size = 4; 1104 bdev_opts.bdev_io_cache_size = 2; 1105 1106 rc = spdk_bdev_set_opts(&bdev_opts); 1107 CU_ASSERT(rc == 0); 1108 spdk_bdev_initialize(bdev_init_cb, NULL); 1109 poll_threads(); 1110 1111 bdev = allocate_bdev("bdev0"); 1112 1113 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1114 CU_ASSERT(rc == 0); 1115 poll_threads(); 1116 SPDK_CU_ASSERT_FATAL(desc != NULL); 1117 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1118 io_ch = spdk_bdev_get_io_channel(desc); 1119 CU_ASSERT(io_ch != NULL); 1120 1121 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1122 CU_ASSERT(rc == 0); 1123 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1124 CU_ASSERT(rc == 0); 1125 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1126 CU_ASSERT(rc == 0); 1127 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1128 CU_ASSERT(rc == 0); 1129 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1130 1131 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1132 CU_ASSERT(rc == -ENOMEM); 1133 1134 io_wait_entry.entry.bdev = bdev; 1135 io_wait_entry.entry.cb_fn = io_wait_cb; 1136 io_wait_entry.entry.cb_arg = &io_wait_entry; 1137 io_wait_entry.io_ch = io_ch; 1138 io_wait_entry.desc = desc; 1139 io_wait_entry.submitted = false; 1140 /* Cannot use the same io_wait_entry for two different calls. */ 1141 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1142 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1143 1144 /* Queue two I/O waits. */ 1145 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1146 CU_ASSERT(rc == 0); 1147 CU_ASSERT(io_wait_entry.submitted == false); 1148 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1149 CU_ASSERT(rc == 0); 1150 CU_ASSERT(io_wait_entry2.submitted == false); 1151 1152 stub_complete_io(1); 1153 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1154 CU_ASSERT(io_wait_entry.submitted == true); 1155 CU_ASSERT(io_wait_entry2.submitted == false); 1156 1157 stub_complete_io(1); 1158 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1159 CU_ASSERT(io_wait_entry2.submitted == true); 1160 1161 stub_complete_io(4); 1162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1163 1164 spdk_put_io_channel(io_ch); 1165 spdk_bdev_close(desc); 1166 free_bdev(bdev); 1167 spdk_bdev_finish(bdev_fini_cb, NULL); 1168 poll_threads(); 1169 } 1170 1171 static void 1172 bdev_io_spans_split_test(void) 1173 { 1174 struct spdk_bdev bdev; 1175 struct spdk_bdev_io bdev_io; 1176 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1177 1178 memset(&bdev, 0, sizeof(bdev)); 1179 bdev_io.u.bdev.iovs = iov; 1180 1181 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1182 bdev.optimal_io_boundary = 0; 1183 bdev.max_segment_size = 0; 1184 bdev.max_num_segments = 0; 1185 bdev_io.bdev = &bdev; 1186 1187 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1188 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1189 1190 bdev.split_on_optimal_io_boundary = true; 1191 bdev.optimal_io_boundary = 32; 1192 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1193 1194 /* RESETs are not based on LBAs - so this should return false. */ 1195 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1196 1197 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1198 bdev_io.u.bdev.offset_blocks = 0; 1199 bdev_io.u.bdev.num_blocks = 32; 1200 1201 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1202 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1203 1204 bdev_io.u.bdev.num_blocks = 33; 1205 1206 /* This I/O spans a boundary. */ 1207 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1208 1209 bdev_io.u.bdev.num_blocks = 32; 1210 bdev.max_segment_size = 512 * 32; 1211 bdev.max_num_segments = 1; 1212 bdev_io.u.bdev.iovcnt = 1; 1213 iov[0].iov_len = 512; 1214 1215 /* Does not cross and exceed max_size or max_segs */ 1216 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1217 1218 bdev.split_on_optimal_io_boundary = false; 1219 bdev.max_segment_size = 512; 1220 bdev.max_num_segments = 1; 1221 bdev_io.u.bdev.iovcnt = 2; 1222 1223 /* Exceed max_segs */ 1224 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1225 1226 bdev.max_num_segments = 2; 1227 iov[0].iov_len = 513; 1228 iov[1].iov_len = 512; 1229 1230 /* Exceed max_sizes */ 1231 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1232 } 1233 1234 static void 1235 bdev_io_boundary_split_test(void) 1236 { 1237 struct spdk_bdev *bdev; 1238 struct spdk_bdev_desc *desc = NULL; 1239 struct spdk_io_channel *io_ch; 1240 struct spdk_bdev_opts bdev_opts = {}; 1241 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1242 struct ut_expected_io *expected_io; 1243 void *md_buf = (void *)0xFF000000; 1244 uint64_t i; 1245 int rc; 1246 1247 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1248 bdev_opts.bdev_io_pool_size = 512; 1249 bdev_opts.bdev_io_cache_size = 64; 1250 1251 rc = spdk_bdev_set_opts(&bdev_opts); 1252 CU_ASSERT(rc == 0); 1253 spdk_bdev_initialize(bdev_init_cb, NULL); 1254 1255 bdev = allocate_bdev("bdev0"); 1256 1257 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1258 CU_ASSERT(rc == 0); 1259 SPDK_CU_ASSERT_FATAL(desc != NULL); 1260 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1261 io_ch = spdk_bdev_get_io_channel(desc); 1262 CU_ASSERT(io_ch != NULL); 1263 1264 bdev->optimal_io_boundary = 16; 1265 bdev->split_on_optimal_io_boundary = false; 1266 1267 g_io_done = false; 1268 1269 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1270 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1271 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1272 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1273 1274 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1275 CU_ASSERT(rc == 0); 1276 CU_ASSERT(g_io_done == false); 1277 1278 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1279 stub_complete_io(1); 1280 CU_ASSERT(g_io_done == true); 1281 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1282 1283 bdev->split_on_optimal_io_boundary = true; 1284 bdev->md_interleave = false; 1285 bdev->md_len = 8; 1286 1287 /* Now test that a single-vector command is split correctly. 1288 * Offset 14, length 8, payload 0xF000 1289 * Child - Offset 14, length 2, payload 0xF000 1290 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1291 * 1292 * Set up the expected values before calling spdk_bdev_read_blocks 1293 */ 1294 g_io_done = false; 1295 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1296 expected_io->md_buf = md_buf; 1297 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1298 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1299 1300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1301 expected_io->md_buf = md_buf + 2 * 8; 1302 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1303 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1304 1305 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1306 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1307 14, 8, io_done, NULL); 1308 CU_ASSERT(rc == 0); 1309 CU_ASSERT(g_io_done == false); 1310 1311 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1312 stub_complete_io(2); 1313 CU_ASSERT(g_io_done == true); 1314 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1315 1316 /* Now set up a more complex, multi-vector command that needs to be split, 1317 * including splitting iovecs. 1318 */ 1319 iov[0].iov_base = (void *)0x10000; 1320 iov[0].iov_len = 512; 1321 iov[1].iov_base = (void *)0x20000; 1322 iov[1].iov_len = 20 * 512; 1323 iov[2].iov_base = (void *)0x30000; 1324 iov[2].iov_len = 11 * 512; 1325 1326 g_io_done = false; 1327 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1328 expected_io->md_buf = md_buf; 1329 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1330 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1331 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1332 1333 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1334 expected_io->md_buf = md_buf + 2 * 8; 1335 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1336 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1337 1338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1339 expected_io->md_buf = md_buf + 18 * 8; 1340 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1341 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1343 1344 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1345 14, 32, io_done, NULL); 1346 CU_ASSERT(rc == 0); 1347 CU_ASSERT(g_io_done == false); 1348 1349 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1350 stub_complete_io(3); 1351 CU_ASSERT(g_io_done == true); 1352 1353 /* Test multi vector command that needs to be split by strip and then needs to be 1354 * split further due to the capacity of child iovs. 1355 */ 1356 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1357 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1358 iov[i].iov_len = 512; 1359 } 1360 1361 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1362 g_io_done = false; 1363 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1364 BDEV_IO_NUM_CHILD_IOV); 1365 expected_io->md_buf = md_buf; 1366 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1367 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1368 } 1369 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1370 1371 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1372 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1373 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1374 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1375 ut_expected_io_set_iov(expected_io, i, 1376 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1377 } 1378 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1379 1380 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1381 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1382 CU_ASSERT(rc == 0); 1383 CU_ASSERT(g_io_done == false); 1384 1385 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1386 stub_complete_io(1); 1387 CU_ASSERT(g_io_done == false); 1388 1389 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1390 stub_complete_io(1); 1391 CU_ASSERT(g_io_done == true); 1392 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1393 1394 /* Test multi vector command that needs to be split by strip and then needs to be 1395 * split further due to the capacity of child iovs. In this case, the length of 1396 * the rest of iovec array with an I/O boundary is the multiple of block size. 1397 */ 1398 1399 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1400 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1401 */ 1402 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1403 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1404 iov[i].iov_len = 512; 1405 } 1406 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1407 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1408 iov[i].iov_len = 256; 1409 } 1410 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1411 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1412 1413 /* Add an extra iovec to trigger split */ 1414 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1415 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1416 1417 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1418 g_io_done = false; 1419 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1420 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1421 expected_io->md_buf = md_buf; 1422 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1423 ut_expected_io_set_iov(expected_io, i, 1424 (void *)((i + 1) * 0x10000), 512); 1425 } 1426 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1427 ut_expected_io_set_iov(expected_io, i, 1428 (void *)((i + 1) * 0x10000), 256); 1429 } 1430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1431 1432 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1433 1, 1); 1434 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1435 ut_expected_io_set_iov(expected_io, 0, 1436 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1437 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1438 1439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1440 1, 1); 1441 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1442 ut_expected_io_set_iov(expected_io, 0, 1443 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1444 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1445 1446 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1447 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1448 CU_ASSERT(rc == 0); 1449 CU_ASSERT(g_io_done == false); 1450 1451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1452 stub_complete_io(1); 1453 CU_ASSERT(g_io_done == false); 1454 1455 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1456 stub_complete_io(2); 1457 CU_ASSERT(g_io_done == true); 1458 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1459 1460 /* Test multi vector command that needs to be split by strip and then needs to be 1461 * split further due to the capacity of child iovs, the child request offset should 1462 * be rewind to last aligned offset and go success without error. 1463 */ 1464 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1465 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1466 iov[i].iov_len = 512; 1467 } 1468 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1469 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1470 1471 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1472 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1473 1474 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1475 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1476 1477 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1478 g_io_done = false; 1479 g_io_status = 0; 1480 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1481 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1482 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1483 expected_io->md_buf = md_buf; 1484 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1485 ut_expected_io_set_iov(expected_io, i, 1486 (void *)((i + 1) * 0x10000), 512); 1487 } 1488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1489 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1490 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1491 1, 2); 1492 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1493 ut_expected_io_set_iov(expected_io, 0, 1494 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1495 ut_expected_io_set_iov(expected_io, 1, 1496 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1497 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1498 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1500 1, 1); 1501 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1502 ut_expected_io_set_iov(expected_io, 0, 1503 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1504 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1505 1506 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1507 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1508 CU_ASSERT(rc == 0); 1509 CU_ASSERT(g_io_done == false); 1510 1511 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1512 stub_complete_io(1); 1513 CU_ASSERT(g_io_done == false); 1514 1515 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1516 stub_complete_io(2); 1517 CU_ASSERT(g_io_done == true); 1518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1519 1520 /* Test multi vector command that needs to be split due to the IO boundary and 1521 * the capacity of child iovs. Especially test the case when the command is 1522 * split due to the capacity of child iovs, the tail address is not aligned with 1523 * block size and is rewinded to the aligned address. 1524 * 1525 * The iovecs used in read request is complex but is based on the data 1526 * collected in the real issue. We change the base addresses but keep the lengths 1527 * not to loose the credibility of the test. 1528 */ 1529 bdev->optimal_io_boundary = 128; 1530 g_io_done = false; 1531 g_io_status = 0; 1532 1533 for (i = 0; i < 31; i++) { 1534 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1535 iov[i].iov_len = 1024; 1536 } 1537 iov[31].iov_base = (void *)0xFEED1F00000; 1538 iov[31].iov_len = 32768; 1539 iov[32].iov_base = (void *)0xFEED2000000; 1540 iov[32].iov_len = 160; 1541 iov[33].iov_base = (void *)0xFEED2100000; 1542 iov[33].iov_len = 4096; 1543 iov[34].iov_base = (void *)0xFEED2200000; 1544 iov[34].iov_len = 4096; 1545 iov[35].iov_base = (void *)0xFEED2300000; 1546 iov[35].iov_len = 4096; 1547 iov[36].iov_base = (void *)0xFEED2400000; 1548 iov[36].iov_len = 4096; 1549 iov[37].iov_base = (void *)0xFEED2500000; 1550 iov[37].iov_len = 4096; 1551 iov[38].iov_base = (void *)0xFEED2600000; 1552 iov[38].iov_len = 4096; 1553 iov[39].iov_base = (void *)0xFEED2700000; 1554 iov[39].iov_len = 4096; 1555 iov[40].iov_base = (void *)0xFEED2800000; 1556 iov[40].iov_len = 4096; 1557 iov[41].iov_base = (void *)0xFEED2900000; 1558 iov[41].iov_len = 4096; 1559 iov[42].iov_base = (void *)0xFEED2A00000; 1560 iov[42].iov_len = 4096; 1561 iov[43].iov_base = (void *)0xFEED2B00000; 1562 iov[43].iov_len = 12288; 1563 iov[44].iov_base = (void *)0xFEED2C00000; 1564 iov[44].iov_len = 8192; 1565 iov[45].iov_base = (void *)0xFEED2F00000; 1566 iov[45].iov_len = 4096; 1567 iov[46].iov_base = (void *)0xFEED3000000; 1568 iov[46].iov_len = 4096; 1569 iov[47].iov_base = (void *)0xFEED3100000; 1570 iov[47].iov_len = 4096; 1571 iov[48].iov_base = (void *)0xFEED3200000; 1572 iov[48].iov_len = 24576; 1573 iov[49].iov_base = (void *)0xFEED3300000; 1574 iov[49].iov_len = 16384; 1575 iov[50].iov_base = (void *)0xFEED3400000; 1576 iov[50].iov_len = 12288; 1577 iov[51].iov_base = (void *)0xFEED3500000; 1578 iov[51].iov_len = 4096; 1579 iov[52].iov_base = (void *)0xFEED3600000; 1580 iov[52].iov_len = 4096; 1581 iov[53].iov_base = (void *)0xFEED3700000; 1582 iov[53].iov_len = 4096; 1583 iov[54].iov_base = (void *)0xFEED3800000; 1584 iov[54].iov_len = 28672; 1585 iov[55].iov_base = (void *)0xFEED3900000; 1586 iov[55].iov_len = 20480; 1587 iov[56].iov_base = (void *)0xFEED3A00000; 1588 iov[56].iov_len = 4096; 1589 iov[57].iov_base = (void *)0xFEED3B00000; 1590 iov[57].iov_len = 12288; 1591 iov[58].iov_base = (void *)0xFEED3C00000; 1592 iov[58].iov_len = 4096; 1593 iov[59].iov_base = (void *)0xFEED3D00000; 1594 iov[59].iov_len = 4096; 1595 iov[60].iov_base = (void *)0xFEED3E00000; 1596 iov[60].iov_len = 352; 1597 1598 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1599 * of child iovs, 1600 */ 1601 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1602 expected_io->md_buf = md_buf; 1603 for (i = 0; i < 32; i++) { 1604 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1605 } 1606 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1607 1608 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1609 * split by the IO boundary requirement. 1610 */ 1611 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1612 expected_io->md_buf = md_buf + 126 * 8; 1613 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1614 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1615 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1616 1617 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1618 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1619 */ 1620 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1621 expected_io->md_buf = md_buf + 128 * 8; 1622 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1623 iov[33].iov_len - 864); 1624 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1625 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1626 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1627 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1628 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1629 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1630 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1631 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1632 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1633 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1634 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1635 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1636 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1637 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1638 1639 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1640 * first 864 bytes of iov[52] split by the IO boundary requirement. 1641 */ 1642 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1643 expected_io->md_buf = md_buf + 256 * 8; 1644 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1645 iov[46].iov_len - 864); 1646 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1647 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1648 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1649 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1650 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1651 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1652 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1653 1654 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1655 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1656 */ 1657 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1658 expected_io->md_buf = md_buf + 384 * 8; 1659 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1660 iov[52].iov_len - 864); 1661 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1662 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1663 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1664 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1665 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1666 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1667 1668 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1669 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1670 */ 1671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1672 expected_io->md_buf = md_buf + 512 * 8; 1673 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1674 iov[57].iov_len - 4960); 1675 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1676 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1677 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1678 1679 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1680 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1681 expected_io->md_buf = md_buf + 542 * 8; 1682 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1683 iov[59].iov_len - 3936); 1684 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1685 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1686 1687 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1688 0, 543, io_done, NULL); 1689 CU_ASSERT(rc == 0); 1690 CU_ASSERT(g_io_done == false); 1691 1692 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1693 stub_complete_io(1); 1694 CU_ASSERT(g_io_done == false); 1695 1696 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1697 stub_complete_io(5); 1698 CU_ASSERT(g_io_done == false); 1699 1700 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1701 stub_complete_io(1); 1702 CU_ASSERT(g_io_done == true); 1703 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1704 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1705 1706 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1707 * split, so test that. 1708 */ 1709 bdev->optimal_io_boundary = 15; 1710 g_io_done = false; 1711 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1712 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1713 1714 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1715 CU_ASSERT(rc == 0); 1716 CU_ASSERT(g_io_done == false); 1717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1718 stub_complete_io(1); 1719 CU_ASSERT(g_io_done == true); 1720 1721 /* Test an UNMAP. This should also not be split. */ 1722 bdev->optimal_io_boundary = 16; 1723 g_io_done = false; 1724 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1725 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1726 1727 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1728 CU_ASSERT(rc == 0); 1729 CU_ASSERT(g_io_done == false); 1730 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1731 stub_complete_io(1); 1732 CU_ASSERT(g_io_done == true); 1733 1734 /* Test a FLUSH. This should also not be split. */ 1735 bdev->optimal_io_boundary = 16; 1736 g_io_done = false; 1737 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1738 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1739 1740 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1741 CU_ASSERT(rc == 0); 1742 CU_ASSERT(g_io_done == false); 1743 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1744 stub_complete_io(1); 1745 CU_ASSERT(g_io_done == true); 1746 1747 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1748 1749 /* Children requests return an error status */ 1750 bdev->optimal_io_boundary = 16; 1751 iov[0].iov_base = (void *)0x10000; 1752 iov[0].iov_len = 512 * 64; 1753 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1754 g_io_done = false; 1755 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1756 1757 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1758 CU_ASSERT(rc == 0); 1759 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1760 stub_complete_io(4); 1761 CU_ASSERT(g_io_done == false); 1762 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1763 stub_complete_io(1); 1764 CU_ASSERT(g_io_done == true); 1765 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1766 1767 /* Test if a multi vector command terminated with failure before continuing 1768 * splitting process when one of child I/O failed. 1769 * The multi vector command is as same as the above that needs to be split by strip 1770 * and then needs to be split further due to the capacity of child iovs. 1771 */ 1772 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1773 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1774 iov[i].iov_len = 512; 1775 } 1776 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1777 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1778 1779 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1780 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1781 1782 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1783 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1784 1785 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1786 1787 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1788 g_io_done = false; 1789 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1790 1791 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1792 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1793 CU_ASSERT(rc == 0); 1794 CU_ASSERT(g_io_done == false); 1795 1796 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1797 stub_complete_io(1); 1798 CU_ASSERT(g_io_done == true); 1799 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1800 1801 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1802 1803 /* for this test we will create the following conditions to hit the code path where 1804 * we are trying to send and IO following a split that has no iovs because we had to 1805 * trim them for alignment reasons. 1806 * 1807 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1808 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1809 * position 30 and overshoot by 0x2e. 1810 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1811 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1812 * which eliniates that vector so we just send the first split IO with 30 vectors 1813 * and let the completion pick up the last 2 vectors. 1814 */ 1815 bdev->optimal_io_boundary = 32; 1816 bdev->split_on_optimal_io_boundary = true; 1817 g_io_done = false; 1818 1819 /* Init all parent IOVs to 0x212 */ 1820 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1821 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1822 iov[i].iov_len = 0x212; 1823 } 1824 1825 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1826 BDEV_IO_NUM_CHILD_IOV - 1); 1827 /* expect 0-29 to be 1:1 with the parent iov */ 1828 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1829 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1830 } 1831 1832 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1833 * where 0x1e is the amount we overshot the 16K boundary 1834 */ 1835 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1836 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1837 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1838 1839 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1840 * shortened that take it to the next boundary and then a final one to get us to 1841 * 0x4200 bytes for the IO. 1842 */ 1843 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1844 BDEV_IO_NUM_CHILD_IOV, 2); 1845 /* position 30 picked up the remaining bytes to the next boundary */ 1846 ut_expected_io_set_iov(expected_io, 0, 1847 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1848 1849 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1850 ut_expected_io_set_iov(expected_io, 1, 1851 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1852 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1853 1854 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1855 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1856 CU_ASSERT(rc == 0); 1857 CU_ASSERT(g_io_done == false); 1858 1859 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1860 stub_complete_io(1); 1861 CU_ASSERT(g_io_done == false); 1862 1863 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1864 stub_complete_io(1); 1865 CU_ASSERT(g_io_done == true); 1866 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1867 1868 spdk_put_io_channel(io_ch); 1869 spdk_bdev_close(desc); 1870 free_bdev(bdev); 1871 spdk_bdev_finish(bdev_fini_cb, NULL); 1872 poll_threads(); 1873 } 1874 1875 static void 1876 bdev_io_max_size_and_segment_split_test(void) 1877 { 1878 struct spdk_bdev *bdev; 1879 struct spdk_bdev_desc *desc = NULL; 1880 struct spdk_io_channel *io_ch; 1881 struct spdk_bdev_opts bdev_opts = {}; 1882 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1883 struct ut_expected_io *expected_io; 1884 uint64_t i; 1885 int rc; 1886 1887 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1888 bdev_opts.bdev_io_pool_size = 512; 1889 bdev_opts.bdev_io_cache_size = 64; 1890 1891 bdev_opts.opts_size = sizeof(bdev_opts); 1892 rc = spdk_bdev_set_opts(&bdev_opts); 1893 CU_ASSERT(rc == 0); 1894 spdk_bdev_initialize(bdev_init_cb, NULL); 1895 1896 bdev = allocate_bdev("bdev0"); 1897 1898 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1899 CU_ASSERT(rc == 0); 1900 SPDK_CU_ASSERT_FATAL(desc != NULL); 1901 io_ch = spdk_bdev_get_io_channel(desc); 1902 CU_ASSERT(io_ch != NULL); 1903 1904 bdev->split_on_optimal_io_boundary = false; 1905 bdev->optimal_io_boundary = 0; 1906 1907 /* Case 0 max_num_segments == 0. 1908 * but segment size 2 * 512 > 512 1909 */ 1910 bdev->max_segment_size = 512; 1911 bdev->max_num_segments = 0; 1912 g_io_done = false; 1913 1914 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1915 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1916 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1917 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1918 1919 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1920 CU_ASSERT(rc == 0); 1921 CU_ASSERT(g_io_done == false); 1922 1923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1924 stub_complete_io(1); 1925 CU_ASSERT(g_io_done == true); 1926 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1927 1928 /* Case 1 max_segment_size == 0 1929 * but iov num 2 > 1. 1930 */ 1931 bdev->max_segment_size = 0; 1932 bdev->max_num_segments = 1; 1933 g_io_done = false; 1934 1935 iov[0].iov_base = (void *)0x10000; 1936 iov[0].iov_len = 512; 1937 iov[1].iov_base = (void *)0x20000; 1938 iov[1].iov_len = 8 * 512; 1939 1940 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1941 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1942 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1943 1944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1945 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1946 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1947 1948 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1949 CU_ASSERT(rc == 0); 1950 CU_ASSERT(g_io_done == false); 1951 1952 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1953 stub_complete_io(2); 1954 CU_ASSERT(g_io_done == true); 1955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1956 1957 /* Test that a non-vector command is split correctly. 1958 * Set up the expected values before calling spdk_bdev_read_blocks 1959 */ 1960 bdev->max_segment_size = 512; 1961 bdev->max_num_segments = 1; 1962 g_io_done = false; 1963 1964 /* Child IO 0 */ 1965 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1966 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1967 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1968 1969 /* Child IO 1 */ 1970 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1971 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1972 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1973 1974 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1975 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1976 CU_ASSERT(rc == 0); 1977 CU_ASSERT(g_io_done == false); 1978 1979 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1980 stub_complete_io(2); 1981 CU_ASSERT(g_io_done == true); 1982 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1983 1984 /* Now set up a more complex, multi-vector command that needs to be split, 1985 * including splitting iovecs. 1986 */ 1987 bdev->max_segment_size = 2 * 512; 1988 bdev->max_num_segments = 1; 1989 g_io_done = false; 1990 1991 iov[0].iov_base = (void *)0x10000; 1992 iov[0].iov_len = 2 * 512; 1993 iov[1].iov_base = (void *)0x20000; 1994 iov[1].iov_len = 4 * 512; 1995 iov[2].iov_base = (void *)0x30000; 1996 iov[2].iov_len = 6 * 512; 1997 1998 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 1999 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2000 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2001 2002 /* Split iov[1].size to 2 iov entries then split the segments */ 2003 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2004 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2005 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2006 2007 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2008 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2009 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2010 2011 /* Split iov[2].size to 3 iov entries then split the segments */ 2012 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2013 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2014 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2015 2016 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2017 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2018 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2019 2020 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2021 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2022 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2023 2024 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2025 CU_ASSERT(rc == 0); 2026 CU_ASSERT(g_io_done == false); 2027 2028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2029 stub_complete_io(6); 2030 CU_ASSERT(g_io_done == true); 2031 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2032 2033 /* Test multi vector command that needs to be split by strip and then needs to be 2034 * split further due to the capacity of parent IO child iovs. 2035 */ 2036 bdev->max_segment_size = 512; 2037 bdev->max_num_segments = 1; 2038 g_io_done = false; 2039 2040 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2041 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2042 iov[i].iov_len = 512 * 2; 2043 } 2044 2045 /* Each input iov.size is split into 2 iovs, 2046 * half of the input iov can fill all child iov entries of a single IO. 2047 */ 2048 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2049 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2050 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2051 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2052 2053 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2054 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2055 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2056 } 2057 2058 /* The remaining iov is split in the second round */ 2059 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2060 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2061 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2062 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2063 2064 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2065 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2066 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2067 } 2068 2069 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2070 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2071 CU_ASSERT(rc == 0); 2072 CU_ASSERT(g_io_done == false); 2073 2074 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2075 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2076 CU_ASSERT(g_io_done == false); 2077 2078 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2079 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2080 CU_ASSERT(g_io_done == true); 2081 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2082 2083 /* A wrong case, a child IO that is divided does 2084 * not meet the principle of multiples of block size, 2085 * and exits with error 2086 */ 2087 bdev->max_segment_size = 512; 2088 bdev->max_num_segments = 1; 2089 g_io_done = false; 2090 2091 iov[0].iov_base = (void *)0x10000; 2092 iov[0].iov_len = 512 + 256; 2093 iov[1].iov_base = (void *)0x20000; 2094 iov[1].iov_len = 256; 2095 2096 /* iov[0] is split to 512 and 256. 2097 * 256 is less than a block size, and it is found 2098 * in the next round of split that it is the first child IO smaller than 2099 * the block size, so the error exit 2100 */ 2101 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2102 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2103 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2104 2105 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2106 CU_ASSERT(rc == 0); 2107 CU_ASSERT(g_io_done == false); 2108 2109 /* First child IO is OK */ 2110 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2111 stub_complete_io(1); 2112 CU_ASSERT(g_io_done == true); 2113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2114 2115 /* error exit */ 2116 stub_complete_io(1); 2117 CU_ASSERT(g_io_done == true); 2118 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2119 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2120 2121 /* Test multi vector command that needs to be split by strip and then needs to be 2122 * split further due to the capacity of child iovs. 2123 * 2124 * In this case, the last two iovs need to be split, but it will exceed the capacity 2125 * of child iovs, so it needs to wait until the first batch completed. 2126 */ 2127 bdev->max_segment_size = 512; 2128 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2129 g_io_done = false; 2130 2131 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2132 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2133 iov[i].iov_len = 512; 2134 } 2135 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2136 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2137 iov[i].iov_len = 512 * 2; 2138 } 2139 2140 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2141 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2142 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2143 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2144 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2145 } 2146 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2147 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2148 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2149 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2150 2151 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2152 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2153 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2154 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2155 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2156 2157 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2158 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2159 CU_ASSERT(rc == 0); 2160 CU_ASSERT(g_io_done == false); 2161 2162 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2163 stub_complete_io(1); 2164 CU_ASSERT(g_io_done == false); 2165 2166 /* Next round */ 2167 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2168 stub_complete_io(1); 2169 CU_ASSERT(g_io_done == true); 2170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2171 2172 /* This case is similar to the previous one, but the io composed of 2173 * the last few entries of child iov is not enough for a blocklen, so they 2174 * cannot be put into this IO, but wait until the next time. 2175 */ 2176 bdev->max_segment_size = 512; 2177 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2178 g_io_done = false; 2179 2180 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2181 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2182 iov[i].iov_len = 512; 2183 } 2184 2185 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2186 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2187 iov[i].iov_len = 128; 2188 } 2189 2190 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2191 * Because the left 2 iov is not enough for a blocklen. 2192 */ 2193 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2194 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2195 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2196 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2197 } 2198 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2199 2200 /* The second child io waits until the end of the first child io before executing. 2201 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2202 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2203 */ 2204 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2205 1, 4); 2206 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2207 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2208 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2209 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2210 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2211 2212 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2213 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2214 CU_ASSERT(rc == 0); 2215 CU_ASSERT(g_io_done == false); 2216 2217 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2218 stub_complete_io(1); 2219 CU_ASSERT(g_io_done == false); 2220 2221 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2222 stub_complete_io(1); 2223 CU_ASSERT(g_io_done == true); 2224 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2225 2226 /* A very complicated case. Each sg entry exceeds max_segment_size and 2227 * needs to be split. At the same time, child io must be a multiple of blocklen. 2228 * At the same time, child iovcnt exceeds parent iovcnt. 2229 */ 2230 bdev->max_segment_size = 512 + 128; 2231 bdev->max_num_segments = 3; 2232 g_io_done = false; 2233 2234 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2235 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2236 iov[i].iov_len = 512 + 256; 2237 } 2238 2239 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2240 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2241 iov[i].iov_len = 512 + 128; 2242 } 2243 2244 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2245 * Consume 4 parent IO iov entries per for() round and 6 block size. 2246 * Generate 9 child IOs. 2247 */ 2248 for (i = 0; i < 3; i++) { 2249 uint32_t j = i * 4; 2250 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2251 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2252 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2253 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2254 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2255 2256 /* Child io must be a multiple of blocklen 2257 * iov[j + 2] must be split. If the third entry is also added, 2258 * the multiple of blocklen cannot be guaranteed. But it still 2259 * occupies one iov entry of the parent child iov. 2260 */ 2261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2262 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2263 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2264 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2265 2266 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2267 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2268 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2269 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2270 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2271 } 2272 2273 /* Child iov position at 27, the 10th child IO 2274 * iov entry index is 3 * 4 and offset is 3 * 6 2275 */ 2276 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2277 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2278 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2279 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2280 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2281 2282 /* Child iov position at 30, the 11th child IO */ 2283 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2284 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2285 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2286 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2287 2288 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2289 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2290 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2291 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2292 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2293 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2294 2295 /* Consume 9 child IOs and 27 child iov entries. 2296 * Consume 4 parent IO iov entries per for() round and 6 block size. 2297 * Parent IO iov index start from 16 and block offset start from 24 2298 */ 2299 for (i = 0; i < 3; i++) { 2300 uint32_t j = i * 4 + 16; 2301 uint32_t offset = i * 6 + 24; 2302 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2303 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2304 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2305 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2306 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2307 2308 /* Child io must be a multiple of blocklen 2309 * iov[j + 2] must be split. If the third entry is also added, 2310 * the multiple of blocklen cannot be guaranteed. But it still 2311 * occupies one iov entry of the parent child iov. 2312 */ 2313 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2314 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2315 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2317 2318 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2319 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2320 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2321 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2322 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2323 } 2324 2325 /* The 22th child IO, child iov position at 30 */ 2326 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2327 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2328 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2329 2330 /* The third round */ 2331 /* Here is the 23nd child IO and child iovpos is 0 */ 2332 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2333 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2334 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2335 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2336 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2337 2338 /* The 24th child IO */ 2339 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2340 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2341 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2342 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2343 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2344 2345 /* The 25th child IO */ 2346 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2347 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2348 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2349 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2350 2351 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2352 50, io_done, NULL); 2353 CU_ASSERT(rc == 0); 2354 CU_ASSERT(g_io_done == false); 2355 2356 /* Parent IO supports up to 32 child iovs, so it is calculated that 2357 * a maximum of 11 IOs can be split at a time, and the 2358 * splitting will continue after the first batch is over. 2359 */ 2360 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2361 stub_complete_io(11); 2362 CU_ASSERT(g_io_done == false); 2363 2364 /* The 2nd round */ 2365 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2366 stub_complete_io(11); 2367 CU_ASSERT(g_io_done == false); 2368 2369 /* The last round */ 2370 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2371 stub_complete_io(3); 2372 CU_ASSERT(g_io_done == true); 2373 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2374 2375 /* Test an WRITE_ZEROES. This should also not be split. */ 2376 bdev->max_segment_size = 512; 2377 bdev->max_num_segments = 1; 2378 g_io_done = false; 2379 2380 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2382 2383 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2384 CU_ASSERT(rc == 0); 2385 CU_ASSERT(g_io_done == false); 2386 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2387 stub_complete_io(1); 2388 CU_ASSERT(g_io_done == true); 2389 2390 /* Test an UNMAP. This should also not be split. */ 2391 g_io_done = false; 2392 2393 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2394 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2395 2396 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2397 CU_ASSERT(rc == 0); 2398 CU_ASSERT(g_io_done == false); 2399 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2400 stub_complete_io(1); 2401 CU_ASSERT(g_io_done == true); 2402 2403 /* Test a FLUSH. This should also not be split. */ 2404 g_io_done = false; 2405 2406 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2407 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2408 2409 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2410 CU_ASSERT(rc == 0); 2411 CU_ASSERT(g_io_done == false); 2412 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2413 stub_complete_io(1); 2414 CU_ASSERT(g_io_done == true); 2415 2416 spdk_put_io_channel(io_ch); 2417 spdk_bdev_close(desc); 2418 free_bdev(bdev); 2419 spdk_bdev_finish(bdev_fini_cb, NULL); 2420 poll_threads(); 2421 } 2422 2423 static void 2424 bdev_io_mix_split_test(void) 2425 { 2426 struct spdk_bdev *bdev; 2427 struct spdk_bdev_desc *desc = NULL; 2428 struct spdk_io_channel *io_ch; 2429 struct spdk_bdev_opts bdev_opts = {}; 2430 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2431 struct ut_expected_io *expected_io; 2432 uint64_t i; 2433 int rc; 2434 2435 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2436 bdev_opts.bdev_io_pool_size = 512; 2437 bdev_opts.bdev_io_cache_size = 64; 2438 2439 rc = spdk_bdev_set_opts(&bdev_opts); 2440 CU_ASSERT(rc == 0); 2441 spdk_bdev_initialize(bdev_init_cb, NULL); 2442 2443 bdev = allocate_bdev("bdev0"); 2444 2445 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2446 CU_ASSERT(rc == 0); 2447 SPDK_CU_ASSERT_FATAL(desc != NULL); 2448 io_ch = spdk_bdev_get_io_channel(desc); 2449 CU_ASSERT(io_ch != NULL); 2450 2451 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2452 bdev->split_on_optimal_io_boundary = true; 2453 bdev->optimal_io_boundary = 16; 2454 2455 bdev->max_segment_size = 512; 2456 bdev->max_num_segments = 16; 2457 g_io_done = false; 2458 2459 /* IO crossing the IO boundary requires split 2460 * Total 2 child IOs. 2461 */ 2462 2463 /* The 1st child IO split the segment_size to multiple segment entry */ 2464 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2465 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2466 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2467 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2468 2469 /* The 2nd child IO split the segment_size to multiple segment entry */ 2470 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2471 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2472 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2473 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2474 2475 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2476 CU_ASSERT(rc == 0); 2477 CU_ASSERT(g_io_done == false); 2478 2479 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2480 stub_complete_io(2); 2481 CU_ASSERT(g_io_done == true); 2482 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2483 2484 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2485 bdev->max_segment_size = 15 * 512; 2486 bdev->max_num_segments = 1; 2487 g_io_done = false; 2488 2489 /* IO crossing the IO boundary requires split. 2490 * The 1st child IO segment size exceeds the max_segment_size, 2491 * So 1st child IO will be splitted to multiple segment entry. 2492 * Then it split to 2 child IOs because of the max_num_segments. 2493 * Total 3 child IOs. 2494 */ 2495 2496 /* The first 2 IOs are in an IO boundary. 2497 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2498 * So it split to the first 2 IOs. 2499 */ 2500 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2501 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2502 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2503 2504 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2505 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2506 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2507 2508 /* The 3rd Child IO is because of the io boundary */ 2509 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2510 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2511 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2512 2513 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2514 CU_ASSERT(rc == 0); 2515 CU_ASSERT(g_io_done == false); 2516 2517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2518 stub_complete_io(3); 2519 CU_ASSERT(g_io_done == true); 2520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2521 2522 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2523 bdev->max_segment_size = 17 * 512; 2524 bdev->max_num_segments = 1; 2525 g_io_done = false; 2526 2527 /* IO crossing the IO boundary requires split. 2528 * Child IO does not split. 2529 * Total 2 child IOs. 2530 */ 2531 2532 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2533 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2534 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2535 2536 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2537 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2538 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2539 2540 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2541 CU_ASSERT(rc == 0); 2542 CU_ASSERT(g_io_done == false); 2543 2544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2545 stub_complete_io(2); 2546 CU_ASSERT(g_io_done == true); 2547 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2548 2549 /* Now set up a more complex, multi-vector command that needs to be split, 2550 * including splitting iovecs. 2551 * optimal_io_boundary < max_segment_size * max_num_segments 2552 */ 2553 bdev->max_segment_size = 3 * 512; 2554 bdev->max_num_segments = 6; 2555 g_io_done = false; 2556 2557 iov[0].iov_base = (void *)0x10000; 2558 iov[0].iov_len = 4 * 512; 2559 iov[1].iov_base = (void *)0x20000; 2560 iov[1].iov_len = 4 * 512; 2561 iov[2].iov_base = (void *)0x30000; 2562 iov[2].iov_len = 10 * 512; 2563 2564 /* IO crossing the IO boundary requires split. 2565 * The 1st child IO segment size exceeds the max_segment_size and after 2566 * splitting segment_size, the num_segments exceeds max_num_segments. 2567 * So 1st child IO will be splitted to 2 child IOs. 2568 * Total 3 child IOs. 2569 */ 2570 2571 /* The first 2 IOs are in an IO boundary. 2572 * After splitting segment size the segment num exceeds. 2573 * So it splits to 2 child IOs. 2574 */ 2575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2576 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2577 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2578 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2579 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2580 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2581 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2582 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2583 2584 /* The 2nd child IO has the left segment entry */ 2585 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2586 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2587 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2588 2589 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2590 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2591 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2592 2593 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2594 CU_ASSERT(rc == 0); 2595 CU_ASSERT(g_io_done == false); 2596 2597 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2598 stub_complete_io(3); 2599 CU_ASSERT(g_io_done == true); 2600 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2601 2602 /* A very complicated case. Each sg entry exceeds max_segment_size 2603 * and split on io boundary. 2604 * optimal_io_boundary < max_segment_size * max_num_segments 2605 */ 2606 bdev->max_segment_size = 3 * 512; 2607 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2608 g_io_done = false; 2609 2610 for (i = 0; i < 20; i++) { 2611 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2612 iov[i].iov_len = 512 * 4; 2613 } 2614 2615 /* IO crossing the IO boundary requires split. 2616 * 80 block length can split 5 child IOs base on offset and IO boundary. 2617 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2618 * Total 5 child IOs. 2619 */ 2620 2621 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2622 * So each child IO occupies 8 child iov entries. 2623 */ 2624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2625 for (i = 0; i < 4; i++) { 2626 int iovcnt = i * 2; 2627 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2628 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2629 } 2630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2631 2632 /* 2nd child IO and total 16 child iov entries of parent IO */ 2633 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2634 for (i = 4; i < 8; i++) { 2635 int iovcnt = (i - 4) * 2; 2636 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2637 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2638 } 2639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2640 2641 /* 3rd child IO and total 24 child iov entries of parent IO */ 2642 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2643 for (i = 8; i < 12; i++) { 2644 int iovcnt = (i - 8) * 2; 2645 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2646 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2647 } 2648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2649 2650 /* 4th child IO and total 32 child iov entries of parent IO */ 2651 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2652 for (i = 12; i < 16; i++) { 2653 int iovcnt = (i - 12) * 2; 2654 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2655 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2656 } 2657 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2658 2659 /* 5th child IO and because of the child iov entry it should be splitted 2660 * in next round. 2661 */ 2662 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2663 for (i = 16; i < 20; i++) { 2664 int iovcnt = (i - 16) * 2; 2665 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2666 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2667 } 2668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2669 2670 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2671 CU_ASSERT(rc == 0); 2672 CU_ASSERT(g_io_done == false); 2673 2674 /* First split round */ 2675 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2676 stub_complete_io(4); 2677 CU_ASSERT(g_io_done == false); 2678 2679 /* Second split round */ 2680 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2681 stub_complete_io(1); 2682 CU_ASSERT(g_io_done == true); 2683 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2684 2685 spdk_put_io_channel(io_ch); 2686 spdk_bdev_close(desc); 2687 free_bdev(bdev); 2688 spdk_bdev_finish(bdev_fini_cb, NULL); 2689 poll_threads(); 2690 } 2691 2692 static void 2693 bdev_io_split_with_io_wait(void) 2694 { 2695 struct spdk_bdev *bdev; 2696 struct spdk_bdev_desc *desc = NULL; 2697 struct spdk_io_channel *io_ch; 2698 struct spdk_bdev_channel *channel; 2699 struct spdk_bdev_mgmt_channel *mgmt_ch; 2700 struct spdk_bdev_opts bdev_opts = {}; 2701 struct iovec iov[3]; 2702 struct ut_expected_io *expected_io; 2703 int rc; 2704 2705 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2706 bdev_opts.bdev_io_pool_size = 2; 2707 bdev_opts.bdev_io_cache_size = 1; 2708 2709 rc = spdk_bdev_set_opts(&bdev_opts); 2710 CU_ASSERT(rc == 0); 2711 spdk_bdev_initialize(bdev_init_cb, NULL); 2712 2713 bdev = allocate_bdev("bdev0"); 2714 2715 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2716 CU_ASSERT(rc == 0); 2717 CU_ASSERT(desc != NULL); 2718 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2719 io_ch = spdk_bdev_get_io_channel(desc); 2720 CU_ASSERT(io_ch != NULL); 2721 channel = spdk_io_channel_get_ctx(io_ch); 2722 mgmt_ch = channel->shared_resource->mgmt_ch; 2723 2724 bdev->optimal_io_boundary = 16; 2725 bdev->split_on_optimal_io_boundary = true; 2726 2727 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2728 CU_ASSERT(rc == 0); 2729 2730 /* Now test that a single-vector command is split correctly. 2731 * Offset 14, length 8, payload 0xF000 2732 * Child - Offset 14, length 2, payload 0xF000 2733 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2734 * 2735 * Set up the expected values before calling spdk_bdev_read_blocks 2736 */ 2737 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2738 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2739 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2740 2741 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2742 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2743 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2744 2745 /* The following children will be submitted sequentially due to the capacity of 2746 * spdk_bdev_io. 2747 */ 2748 2749 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2750 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2751 CU_ASSERT(rc == 0); 2752 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2753 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2754 2755 /* Completing the first read I/O will submit the first child */ 2756 stub_complete_io(1); 2757 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2758 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2759 2760 /* Completing the first child will submit the second child */ 2761 stub_complete_io(1); 2762 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2763 2764 /* Complete the second child I/O. This should result in our callback getting 2765 * invoked since the parent I/O is now complete. 2766 */ 2767 stub_complete_io(1); 2768 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2769 2770 /* Now set up a more complex, multi-vector command that needs to be split, 2771 * including splitting iovecs. 2772 */ 2773 iov[0].iov_base = (void *)0x10000; 2774 iov[0].iov_len = 512; 2775 iov[1].iov_base = (void *)0x20000; 2776 iov[1].iov_len = 20 * 512; 2777 iov[2].iov_base = (void *)0x30000; 2778 iov[2].iov_len = 11 * 512; 2779 2780 g_io_done = false; 2781 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2782 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2783 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2784 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2785 2786 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2787 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2789 2790 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2791 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2792 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2793 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2794 2795 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2796 CU_ASSERT(rc == 0); 2797 CU_ASSERT(g_io_done == false); 2798 2799 /* The following children will be submitted sequentially due to the capacity of 2800 * spdk_bdev_io. 2801 */ 2802 2803 /* Completing the first child will submit the second child */ 2804 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2805 stub_complete_io(1); 2806 CU_ASSERT(g_io_done == false); 2807 2808 /* Completing the second child will submit the third child */ 2809 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2810 stub_complete_io(1); 2811 CU_ASSERT(g_io_done == false); 2812 2813 /* Completing the third child will result in our callback getting invoked 2814 * since the parent I/O is now complete. 2815 */ 2816 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2817 stub_complete_io(1); 2818 CU_ASSERT(g_io_done == true); 2819 2820 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2821 2822 spdk_put_io_channel(io_ch); 2823 spdk_bdev_close(desc); 2824 free_bdev(bdev); 2825 spdk_bdev_finish(bdev_fini_cb, NULL); 2826 poll_threads(); 2827 } 2828 2829 static void 2830 bdev_io_alignment(void) 2831 { 2832 struct spdk_bdev *bdev; 2833 struct spdk_bdev_desc *desc = NULL; 2834 struct spdk_io_channel *io_ch; 2835 struct spdk_bdev_opts bdev_opts = {}; 2836 int rc; 2837 void *buf = NULL; 2838 struct iovec iovs[2]; 2839 int iovcnt; 2840 uint64_t alignment; 2841 2842 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2843 bdev_opts.bdev_io_pool_size = 20; 2844 bdev_opts.bdev_io_cache_size = 2; 2845 2846 rc = spdk_bdev_set_opts(&bdev_opts); 2847 CU_ASSERT(rc == 0); 2848 spdk_bdev_initialize(bdev_init_cb, NULL); 2849 2850 fn_table.submit_request = stub_submit_request_get_buf; 2851 bdev = allocate_bdev("bdev0"); 2852 2853 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2854 CU_ASSERT(rc == 0); 2855 CU_ASSERT(desc != NULL); 2856 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2857 io_ch = spdk_bdev_get_io_channel(desc); 2858 CU_ASSERT(io_ch != NULL); 2859 2860 /* Create aligned buffer */ 2861 rc = posix_memalign(&buf, 4096, 8192); 2862 SPDK_CU_ASSERT_FATAL(rc == 0); 2863 2864 /* Pass aligned single buffer with no alignment required */ 2865 alignment = 1; 2866 bdev->required_alignment = spdk_u32log2(alignment); 2867 2868 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2869 CU_ASSERT(rc == 0); 2870 stub_complete_io(1); 2871 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2872 alignment)); 2873 2874 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2875 CU_ASSERT(rc == 0); 2876 stub_complete_io(1); 2877 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2878 alignment)); 2879 2880 /* Pass unaligned single buffer with no alignment required */ 2881 alignment = 1; 2882 bdev->required_alignment = spdk_u32log2(alignment); 2883 2884 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2885 CU_ASSERT(rc == 0); 2886 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2887 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2888 stub_complete_io(1); 2889 2890 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2891 CU_ASSERT(rc == 0); 2892 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2893 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2894 stub_complete_io(1); 2895 2896 /* Pass unaligned single buffer with 512 alignment required */ 2897 alignment = 512; 2898 bdev->required_alignment = spdk_u32log2(alignment); 2899 2900 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2901 CU_ASSERT(rc == 0); 2902 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2903 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2904 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2905 alignment)); 2906 stub_complete_io(1); 2907 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2908 2909 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2910 CU_ASSERT(rc == 0); 2911 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2912 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2913 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2914 alignment)); 2915 stub_complete_io(1); 2916 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2917 2918 /* Pass unaligned single buffer with 4096 alignment required */ 2919 alignment = 4096; 2920 bdev->required_alignment = spdk_u32log2(alignment); 2921 2922 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2923 CU_ASSERT(rc == 0); 2924 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2925 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2926 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2927 alignment)); 2928 stub_complete_io(1); 2929 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2930 2931 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2932 CU_ASSERT(rc == 0); 2933 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2934 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2935 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2936 alignment)); 2937 stub_complete_io(1); 2938 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2939 2940 /* Pass aligned iovs with no alignment required */ 2941 alignment = 1; 2942 bdev->required_alignment = spdk_u32log2(alignment); 2943 2944 iovcnt = 1; 2945 iovs[0].iov_base = buf; 2946 iovs[0].iov_len = 512; 2947 2948 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2949 CU_ASSERT(rc == 0); 2950 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2951 stub_complete_io(1); 2952 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2953 2954 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2955 CU_ASSERT(rc == 0); 2956 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2957 stub_complete_io(1); 2958 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2959 2960 /* Pass unaligned iovs with no alignment required */ 2961 alignment = 1; 2962 bdev->required_alignment = spdk_u32log2(alignment); 2963 2964 iovcnt = 2; 2965 iovs[0].iov_base = buf + 16; 2966 iovs[0].iov_len = 256; 2967 iovs[1].iov_base = buf + 16 + 256 + 32; 2968 iovs[1].iov_len = 256; 2969 2970 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2971 CU_ASSERT(rc == 0); 2972 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2973 stub_complete_io(1); 2974 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2975 2976 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2977 CU_ASSERT(rc == 0); 2978 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2979 stub_complete_io(1); 2980 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2981 2982 /* Pass unaligned iov with 2048 alignment required */ 2983 alignment = 2048; 2984 bdev->required_alignment = spdk_u32log2(alignment); 2985 2986 iovcnt = 2; 2987 iovs[0].iov_base = buf + 16; 2988 iovs[0].iov_len = 256; 2989 iovs[1].iov_base = buf + 16 + 256 + 32; 2990 iovs[1].iov_len = 256; 2991 2992 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2993 CU_ASSERT(rc == 0); 2994 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 2995 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2996 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2997 alignment)); 2998 stub_complete_io(1); 2999 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3000 3001 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3002 CU_ASSERT(rc == 0); 3003 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3004 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3005 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3006 alignment)); 3007 stub_complete_io(1); 3008 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3009 3010 /* Pass iov without allocated buffer without alignment required */ 3011 alignment = 1; 3012 bdev->required_alignment = spdk_u32log2(alignment); 3013 3014 iovcnt = 1; 3015 iovs[0].iov_base = NULL; 3016 iovs[0].iov_len = 0; 3017 3018 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3019 CU_ASSERT(rc == 0); 3020 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3021 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3022 alignment)); 3023 stub_complete_io(1); 3024 3025 /* Pass iov without allocated buffer with 1024 alignment required */ 3026 alignment = 1024; 3027 bdev->required_alignment = spdk_u32log2(alignment); 3028 3029 iovcnt = 1; 3030 iovs[0].iov_base = NULL; 3031 iovs[0].iov_len = 0; 3032 3033 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3034 CU_ASSERT(rc == 0); 3035 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3036 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3037 alignment)); 3038 stub_complete_io(1); 3039 3040 spdk_put_io_channel(io_ch); 3041 spdk_bdev_close(desc); 3042 free_bdev(bdev); 3043 fn_table.submit_request = stub_submit_request; 3044 spdk_bdev_finish(bdev_fini_cb, NULL); 3045 poll_threads(); 3046 3047 free(buf); 3048 } 3049 3050 static void 3051 bdev_io_alignment_with_boundary(void) 3052 { 3053 struct spdk_bdev *bdev; 3054 struct spdk_bdev_desc *desc = NULL; 3055 struct spdk_io_channel *io_ch; 3056 struct spdk_bdev_opts bdev_opts = {}; 3057 int rc; 3058 void *buf = NULL; 3059 struct iovec iovs[2]; 3060 int iovcnt; 3061 uint64_t alignment; 3062 3063 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3064 bdev_opts.bdev_io_pool_size = 20; 3065 bdev_opts.bdev_io_cache_size = 2; 3066 3067 bdev_opts.opts_size = sizeof(bdev_opts); 3068 rc = spdk_bdev_set_opts(&bdev_opts); 3069 CU_ASSERT(rc == 0); 3070 spdk_bdev_initialize(bdev_init_cb, NULL); 3071 3072 fn_table.submit_request = stub_submit_request_get_buf; 3073 bdev = allocate_bdev("bdev0"); 3074 3075 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3076 CU_ASSERT(rc == 0); 3077 CU_ASSERT(desc != NULL); 3078 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3079 io_ch = spdk_bdev_get_io_channel(desc); 3080 CU_ASSERT(io_ch != NULL); 3081 3082 /* Create aligned buffer */ 3083 rc = posix_memalign(&buf, 4096, 131072); 3084 SPDK_CU_ASSERT_FATAL(rc == 0); 3085 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3086 3087 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3088 alignment = 512; 3089 bdev->required_alignment = spdk_u32log2(alignment); 3090 bdev->optimal_io_boundary = 2; 3091 bdev->split_on_optimal_io_boundary = true; 3092 3093 iovcnt = 1; 3094 iovs[0].iov_base = NULL; 3095 iovs[0].iov_len = 512 * 3; 3096 3097 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3098 CU_ASSERT(rc == 0); 3099 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3100 stub_complete_io(2); 3101 3102 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3103 alignment = 512; 3104 bdev->required_alignment = spdk_u32log2(alignment); 3105 bdev->optimal_io_boundary = 16; 3106 bdev->split_on_optimal_io_boundary = true; 3107 3108 iovcnt = 1; 3109 iovs[0].iov_base = NULL; 3110 iovs[0].iov_len = 512 * 16; 3111 3112 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3113 CU_ASSERT(rc == 0); 3114 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3115 stub_complete_io(2); 3116 3117 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3118 alignment = 512; 3119 bdev->required_alignment = spdk_u32log2(alignment); 3120 bdev->optimal_io_boundary = 128; 3121 bdev->split_on_optimal_io_boundary = true; 3122 3123 iovcnt = 1; 3124 iovs[0].iov_base = buf + 16; 3125 iovs[0].iov_len = 512 * 160; 3126 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3127 CU_ASSERT(rc == 0); 3128 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3129 stub_complete_io(2); 3130 3131 /* 512 * 3 with 2 IO boundary */ 3132 alignment = 512; 3133 bdev->required_alignment = spdk_u32log2(alignment); 3134 bdev->optimal_io_boundary = 2; 3135 bdev->split_on_optimal_io_boundary = true; 3136 3137 iovcnt = 2; 3138 iovs[0].iov_base = buf + 16; 3139 iovs[0].iov_len = 512; 3140 iovs[1].iov_base = buf + 16 + 512 + 32; 3141 iovs[1].iov_len = 1024; 3142 3143 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3144 CU_ASSERT(rc == 0); 3145 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3146 stub_complete_io(2); 3147 3148 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3149 CU_ASSERT(rc == 0); 3150 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3151 stub_complete_io(2); 3152 3153 /* 512 * 64 with 32 IO boundary */ 3154 bdev->optimal_io_boundary = 32; 3155 iovcnt = 2; 3156 iovs[0].iov_base = buf + 16; 3157 iovs[0].iov_len = 16384; 3158 iovs[1].iov_base = buf + 16 + 16384 + 32; 3159 iovs[1].iov_len = 16384; 3160 3161 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3162 CU_ASSERT(rc == 0); 3163 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3164 stub_complete_io(3); 3165 3166 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3167 CU_ASSERT(rc == 0); 3168 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3169 stub_complete_io(3); 3170 3171 /* 512 * 160 with 32 IO boundary */ 3172 iovcnt = 1; 3173 iovs[0].iov_base = buf + 16; 3174 iovs[0].iov_len = 16384 + 65536; 3175 3176 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3177 CU_ASSERT(rc == 0); 3178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3179 stub_complete_io(6); 3180 3181 spdk_put_io_channel(io_ch); 3182 spdk_bdev_close(desc); 3183 free_bdev(bdev); 3184 fn_table.submit_request = stub_submit_request; 3185 spdk_bdev_finish(bdev_fini_cb, NULL); 3186 poll_threads(); 3187 3188 free(buf); 3189 } 3190 3191 static void 3192 histogram_status_cb(void *cb_arg, int status) 3193 { 3194 g_status = status; 3195 } 3196 3197 static void 3198 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3199 { 3200 g_status = status; 3201 g_histogram = histogram; 3202 } 3203 3204 static void 3205 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3206 uint64_t total, uint64_t so_far) 3207 { 3208 g_count += count; 3209 } 3210 3211 static void 3212 bdev_histograms(void) 3213 { 3214 struct spdk_bdev *bdev; 3215 struct spdk_bdev_desc *desc = NULL; 3216 struct spdk_io_channel *ch; 3217 struct spdk_histogram_data *histogram; 3218 uint8_t buf[4096]; 3219 int rc; 3220 3221 spdk_bdev_initialize(bdev_init_cb, NULL); 3222 3223 bdev = allocate_bdev("bdev"); 3224 3225 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3226 CU_ASSERT(rc == 0); 3227 CU_ASSERT(desc != NULL); 3228 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3229 3230 ch = spdk_bdev_get_io_channel(desc); 3231 CU_ASSERT(ch != NULL); 3232 3233 /* Enable histogram */ 3234 g_status = -1; 3235 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3236 poll_threads(); 3237 CU_ASSERT(g_status == 0); 3238 CU_ASSERT(bdev->internal.histogram_enabled == true); 3239 3240 /* Allocate histogram */ 3241 histogram = spdk_histogram_data_alloc(); 3242 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3243 3244 /* Check if histogram is zeroed */ 3245 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3246 poll_threads(); 3247 CU_ASSERT(g_status == 0); 3248 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3249 3250 g_count = 0; 3251 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3252 3253 CU_ASSERT(g_count == 0); 3254 3255 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3256 CU_ASSERT(rc == 0); 3257 3258 spdk_delay_us(10); 3259 stub_complete_io(1); 3260 poll_threads(); 3261 3262 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3263 CU_ASSERT(rc == 0); 3264 3265 spdk_delay_us(10); 3266 stub_complete_io(1); 3267 poll_threads(); 3268 3269 /* Check if histogram gathered data from all I/O channels */ 3270 g_histogram = NULL; 3271 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3272 poll_threads(); 3273 CU_ASSERT(g_status == 0); 3274 CU_ASSERT(bdev->internal.histogram_enabled == true); 3275 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3276 3277 g_count = 0; 3278 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3279 CU_ASSERT(g_count == 2); 3280 3281 /* Disable histogram */ 3282 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3283 poll_threads(); 3284 CU_ASSERT(g_status == 0); 3285 CU_ASSERT(bdev->internal.histogram_enabled == false); 3286 3287 /* Try to run histogram commands on disabled bdev */ 3288 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3289 poll_threads(); 3290 CU_ASSERT(g_status == -EFAULT); 3291 3292 spdk_histogram_data_free(histogram); 3293 spdk_put_io_channel(ch); 3294 spdk_bdev_close(desc); 3295 free_bdev(bdev); 3296 spdk_bdev_finish(bdev_fini_cb, NULL); 3297 poll_threads(); 3298 } 3299 3300 static void 3301 _bdev_compare(bool emulated) 3302 { 3303 struct spdk_bdev *bdev; 3304 struct spdk_bdev_desc *desc = NULL; 3305 struct spdk_io_channel *ioch; 3306 struct ut_expected_io *expected_io; 3307 uint64_t offset, num_blocks; 3308 uint32_t num_completed; 3309 char aa_buf[512]; 3310 char bb_buf[512]; 3311 struct iovec compare_iov; 3312 uint8_t expected_io_type; 3313 int rc; 3314 3315 if (emulated) { 3316 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3317 } else { 3318 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3319 } 3320 3321 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3322 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3323 3324 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3325 3326 spdk_bdev_initialize(bdev_init_cb, NULL); 3327 fn_table.submit_request = stub_submit_request_get_buf; 3328 bdev = allocate_bdev("bdev"); 3329 3330 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3331 CU_ASSERT_EQUAL(rc, 0); 3332 SPDK_CU_ASSERT_FATAL(desc != NULL); 3333 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3334 ioch = spdk_bdev_get_io_channel(desc); 3335 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3336 3337 fn_table.submit_request = stub_submit_request_get_buf; 3338 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3339 3340 offset = 50; 3341 num_blocks = 1; 3342 compare_iov.iov_base = aa_buf; 3343 compare_iov.iov_len = sizeof(aa_buf); 3344 3345 /* 1. successful compare */ 3346 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3347 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3348 3349 g_io_done = false; 3350 g_compare_read_buf = aa_buf; 3351 g_compare_read_buf_len = sizeof(aa_buf); 3352 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3353 CU_ASSERT_EQUAL(rc, 0); 3354 num_completed = stub_complete_io(1); 3355 CU_ASSERT_EQUAL(num_completed, 1); 3356 CU_ASSERT(g_io_done == true); 3357 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3358 3359 /* 2. miscompare */ 3360 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3362 3363 g_io_done = false; 3364 g_compare_read_buf = bb_buf; 3365 g_compare_read_buf_len = sizeof(bb_buf); 3366 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3367 CU_ASSERT_EQUAL(rc, 0); 3368 num_completed = stub_complete_io(1); 3369 CU_ASSERT_EQUAL(num_completed, 1); 3370 CU_ASSERT(g_io_done == true); 3371 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3372 3373 spdk_put_io_channel(ioch); 3374 spdk_bdev_close(desc); 3375 free_bdev(bdev); 3376 fn_table.submit_request = stub_submit_request; 3377 spdk_bdev_finish(bdev_fini_cb, NULL); 3378 poll_threads(); 3379 3380 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3381 3382 g_compare_read_buf = NULL; 3383 } 3384 3385 static void 3386 _bdev_compare_with_md(bool emulated) 3387 { 3388 struct spdk_bdev *bdev; 3389 struct spdk_bdev_desc *desc = NULL; 3390 struct spdk_io_channel *ioch; 3391 struct ut_expected_io *expected_io; 3392 uint64_t offset, num_blocks; 3393 uint32_t num_completed; 3394 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3395 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3396 char buf_miscompare[1024 /* 2 * blocklen */]; 3397 char md_buf[16]; 3398 char md_buf_miscompare[16]; 3399 struct iovec compare_iov; 3400 uint8_t expected_io_type; 3401 int rc; 3402 3403 if (emulated) { 3404 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3405 } else { 3406 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3407 } 3408 3409 memset(buf, 0xaa, sizeof(buf)); 3410 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3411 /* make last md different */ 3412 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3413 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3414 memset(md_buf, 0xaa, 16); 3415 memset(md_buf_miscompare, 0xbb, 16); 3416 3417 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3418 3419 spdk_bdev_initialize(bdev_init_cb, NULL); 3420 fn_table.submit_request = stub_submit_request_get_buf; 3421 bdev = allocate_bdev("bdev"); 3422 3423 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3424 CU_ASSERT_EQUAL(rc, 0); 3425 SPDK_CU_ASSERT_FATAL(desc != NULL); 3426 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3427 ioch = spdk_bdev_get_io_channel(desc); 3428 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3429 3430 fn_table.submit_request = stub_submit_request_get_buf; 3431 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3432 3433 offset = 50; 3434 num_blocks = 2; 3435 3436 /* interleaved md & data */ 3437 bdev->md_interleave = true; 3438 bdev->md_len = 8; 3439 bdev->blocklen = 512 + 8; 3440 compare_iov.iov_base = buf; 3441 compare_iov.iov_len = sizeof(buf); 3442 3443 /* 1. successful compare with md interleaved */ 3444 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3445 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3446 3447 g_io_done = false; 3448 g_compare_read_buf = buf; 3449 g_compare_read_buf_len = sizeof(buf); 3450 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3451 CU_ASSERT_EQUAL(rc, 0); 3452 num_completed = stub_complete_io(1); 3453 CU_ASSERT_EQUAL(num_completed, 1); 3454 CU_ASSERT(g_io_done == true); 3455 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3456 3457 /* 2. miscompare with md interleaved */ 3458 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3459 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3460 3461 g_io_done = false; 3462 g_compare_read_buf = buf_interleaved_miscompare; 3463 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3464 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3465 CU_ASSERT_EQUAL(rc, 0); 3466 num_completed = stub_complete_io(1); 3467 CU_ASSERT_EQUAL(num_completed, 1); 3468 CU_ASSERT(g_io_done == true); 3469 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3470 3471 /* Separate data & md buffers */ 3472 bdev->md_interleave = false; 3473 bdev->blocklen = 512; 3474 compare_iov.iov_base = buf; 3475 compare_iov.iov_len = 1024; 3476 3477 /* 3. successful compare with md separated */ 3478 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3480 3481 g_io_done = false; 3482 g_compare_read_buf = buf; 3483 g_compare_read_buf_len = 1024; 3484 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3485 g_compare_md_buf = md_buf; 3486 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3487 offset, num_blocks, io_done, NULL); 3488 CU_ASSERT_EQUAL(rc, 0); 3489 num_completed = stub_complete_io(1); 3490 CU_ASSERT_EQUAL(num_completed, 1); 3491 CU_ASSERT(g_io_done == true); 3492 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3493 3494 /* 4. miscompare with md separated where md buf is different */ 3495 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3497 3498 g_io_done = false; 3499 g_compare_read_buf = buf; 3500 g_compare_read_buf_len = 1024; 3501 g_compare_md_buf = md_buf_miscompare; 3502 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3503 offset, num_blocks, io_done, NULL); 3504 CU_ASSERT_EQUAL(rc, 0); 3505 num_completed = stub_complete_io(1); 3506 CU_ASSERT_EQUAL(num_completed, 1); 3507 CU_ASSERT(g_io_done == true); 3508 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3509 3510 /* 5. miscompare with md separated where buf is different */ 3511 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3513 3514 g_io_done = false; 3515 g_compare_read_buf = buf_miscompare; 3516 g_compare_read_buf_len = sizeof(buf_miscompare); 3517 g_compare_md_buf = md_buf; 3518 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3519 offset, num_blocks, io_done, NULL); 3520 CU_ASSERT_EQUAL(rc, 0); 3521 num_completed = stub_complete_io(1); 3522 CU_ASSERT_EQUAL(num_completed, 1); 3523 CU_ASSERT(g_io_done == true); 3524 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3525 3526 bdev->md_len = 0; 3527 g_compare_md_buf = NULL; 3528 3529 spdk_put_io_channel(ioch); 3530 spdk_bdev_close(desc); 3531 free_bdev(bdev); 3532 fn_table.submit_request = stub_submit_request; 3533 spdk_bdev_finish(bdev_fini_cb, NULL); 3534 poll_threads(); 3535 3536 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3537 3538 g_compare_read_buf = NULL; 3539 } 3540 3541 static void 3542 bdev_compare(void) 3543 { 3544 _bdev_compare(false); 3545 _bdev_compare_with_md(false); 3546 } 3547 3548 static void 3549 bdev_compare_emulated(void) 3550 { 3551 _bdev_compare(true); 3552 _bdev_compare_with_md(true); 3553 } 3554 3555 static void 3556 bdev_compare_and_write(void) 3557 { 3558 struct spdk_bdev *bdev; 3559 struct spdk_bdev_desc *desc = NULL; 3560 struct spdk_io_channel *ioch; 3561 struct ut_expected_io *expected_io; 3562 uint64_t offset, num_blocks; 3563 uint32_t num_completed; 3564 char aa_buf[512]; 3565 char bb_buf[512]; 3566 char cc_buf[512]; 3567 char write_buf[512]; 3568 struct iovec compare_iov; 3569 struct iovec write_iov; 3570 int rc; 3571 3572 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3573 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3574 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3575 3576 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3577 3578 spdk_bdev_initialize(bdev_init_cb, NULL); 3579 fn_table.submit_request = stub_submit_request_get_buf; 3580 bdev = allocate_bdev("bdev"); 3581 3582 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3583 CU_ASSERT_EQUAL(rc, 0); 3584 SPDK_CU_ASSERT_FATAL(desc != NULL); 3585 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3586 ioch = spdk_bdev_get_io_channel(desc); 3587 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3588 3589 fn_table.submit_request = stub_submit_request_get_buf; 3590 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3591 3592 offset = 50; 3593 num_blocks = 1; 3594 compare_iov.iov_base = aa_buf; 3595 compare_iov.iov_len = sizeof(aa_buf); 3596 write_iov.iov_base = bb_buf; 3597 write_iov.iov_len = sizeof(bb_buf); 3598 3599 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3600 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3601 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3603 3604 g_io_done = false; 3605 g_compare_read_buf = aa_buf; 3606 g_compare_read_buf_len = sizeof(aa_buf); 3607 memset(write_buf, 0, sizeof(write_buf)); 3608 g_compare_write_buf = write_buf; 3609 g_compare_write_buf_len = sizeof(write_buf); 3610 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3611 offset, num_blocks, io_done, NULL); 3612 /* Trigger range locking */ 3613 poll_threads(); 3614 CU_ASSERT_EQUAL(rc, 0); 3615 num_completed = stub_complete_io(1); 3616 CU_ASSERT_EQUAL(num_completed, 1); 3617 CU_ASSERT(g_io_done == false); 3618 num_completed = stub_complete_io(1); 3619 /* Trigger range unlocking */ 3620 poll_threads(); 3621 CU_ASSERT_EQUAL(num_completed, 1); 3622 CU_ASSERT(g_io_done == true); 3623 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3624 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3625 3626 /* Test miscompare */ 3627 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3628 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3629 3630 g_io_done = false; 3631 g_compare_read_buf = cc_buf; 3632 g_compare_read_buf_len = sizeof(cc_buf); 3633 memset(write_buf, 0, sizeof(write_buf)); 3634 g_compare_write_buf = write_buf; 3635 g_compare_write_buf_len = sizeof(write_buf); 3636 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3637 offset, num_blocks, io_done, NULL); 3638 /* Trigger range locking */ 3639 poll_threads(); 3640 CU_ASSERT_EQUAL(rc, 0); 3641 num_completed = stub_complete_io(1); 3642 /* Trigger range unlocking earlier because we expect error here */ 3643 poll_threads(); 3644 CU_ASSERT_EQUAL(num_completed, 1); 3645 CU_ASSERT(g_io_done == true); 3646 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3647 num_completed = stub_complete_io(1); 3648 CU_ASSERT_EQUAL(num_completed, 0); 3649 3650 spdk_put_io_channel(ioch); 3651 spdk_bdev_close(desc); 3652 free_bdev(bdev); 3653 fn_table.submit_request = stub_submit_request; 3654 spdk_bdev_finish(bdev_fini_cb, NULL); 3655 poll_threads(); 3656 3657 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3658 3659 g_compare_read_buf = NULL; 3660 g_compare_write_buf = NULL; 3661 } 3662 3663 static void 3664 bdev_write_zeroes(void) 3665 { 3666 struct spdk_bdev *bdev; 3667 struct spdk_bdev_desc *desc = NULL; 3668 struct spdk_io_channel *ioch; 3669 struct ut_expected_io *expected_io; 3670 uint64_t offset, num_io_blocks, num_blocks; 3671 uint32_t num_completed, num_requests; 3672 int rc; 3673 3674 spdk_bdev_initialize(bdev_init_cb, NULL); 3675 bdev = allocate_bdev("bdev"); 3676 3677 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3678 CU_ASSERT_EQUAL(rc, 0); 3679 SPDK_CU_ASSERT_FATAL(desc != NULL); 3680 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3681 ioch = spdk_bdev_get_io_channel(desc); 3682 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3683 3684 fn_table.submit_request = stub_submit_request; 3685 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3686 3687 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3688 bdev->md_len = 0; 3689 bdev->blocklen = 4096; 3690 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3691 3692 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3693 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3694 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3695 CU_ASSERT_EQUAL(rc, 0); 3696 num_completed = stub_complete_io(1); 3697 CU_ASSERT_EQUAL(num_completed, 1); 3698 3699 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3700 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3701 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3702 num_requests = 2; 3703 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3704 3705 for (offset = 0; offset < num_requests; ++offset) { 3706 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3707 offset * num_io_blocks, num_io_blocks, 0); 3708 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3709 } 3710 3711 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3712 CU_ASSERT_EQUAL(rc, 0); 3713 num_completed = stub_complete_io(num_requests); 3714 CU_ASSERT_EQUAL(num_completed, num_requests); 3715 3716 /* Check that the splitting is correct if bdev has interleaved metadata */ 3717 bdev->md_interleave = true; 3718 bdev->md_len = 64; 3719 bdev->blocklen = 4096 + 64; 3720 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3721 3722 num_requests = offset = 0; 3723 while (offset < num_blocks) { 3724 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3725 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3726 offset, num_io_blocks, 0); 3727 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3728 offset += num_io_blocks; 3729 num_requests++; 3730 } 3731 3732 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3733 CU_ASSERT_EQUAL(rc, 0); 3734 num_completed = stub_complete_io(num_requests); 3735 CU_ASSERT_EQUAL(num_completed, num_requests); 3736 num_completed = stub_complete_io(num_requests); 3737 assert(num_completed == 0); 3738 3739 /* Check the the same for separate metadata buffer */ 3740 bdev->md_interleave = false; 3741 bdev->md_len = 64; 3742 bdev->blocklen = 4096; 3743 3744 num_requests = offset = 0; 3745 while (offset < num_blocks) { 3746 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3747 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3748 offset, num_io_blocks, 0); 3749 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3750 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3751 offset += num_io_blocks; 3752 num_requests++; 3753 } 3754 3755 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3756 CU_ASSERT_EQUAL(rc, 0); 3757 num_completed = stub_complete_io(num_requests); 3758 CU_ASSERT_EQUAL(num_completed, num_requests); 3759 3760 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3761 spdk_put_io_channel(ioch); 3762 spdk_bdev_close(desc); 3763 free_bdev(bdev); 3764 spdk_bdev_finish(bdev_fini_cb, NULL); 3765 poll_threads(); 3766 } 3767 3768 static void 3769 bdev_zcopy_write(void) 3770 { 3771 struct spdk_bdev *bdev; 3772 struct spdk_bdev_desc *desc = NULL; 3773 struct spdk_io_channel *ioch; 3774 struct ut_expected_io *expected_io; 3775 uint64_t offset, num_blocks; 3776 uint32_t num_completed; 3777 char aa_buf[512]; 3778 struct iovec iov; 3779 int rc; 3780 const bool populate = false; 3781 const bool commit = true; 3782 3783 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3784 3785 spdk_bdev_initialize(bdev_init_cb, NULL); 3786 bdev = allocate_bdev("bdev"); 3787 3788 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3789 CU_ASSERT_EQUAL(rc, 0); 3790 SPDK_CU_ASSERT_FATAL(desc != NULL); 3791 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3792 ioch = spdk_bdev_get_io_channel(desc); 3793 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3794 3795 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3796 3797 offset = 50; 3798 num_blocks = 1; 3799 iov.iov_base = NULL; 3800 iov.iov_len = 0; 3801 3802 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3803 g_zcopy_read_buf_len = (uint32_t) -1; 3804 /* Do a zcopy start for a write (populate=false) */ 3805 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3806 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3807 g_io_done = false; 3808 g_zcopy_write_buf = aa_buf; 3809 g_zcopy_write_buf_len = sizeof(aa_buf); 3810 g_zcopy_bdev_io = NULL; 3811 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3812 CU_ASSERT_EQUAL(rc, 0); 3813 num_completed = stub_complete_io(1); 3814 CU_ASSERT_EQUAL(num_completed, 1); 3815 CU_ASSERT(g_io_done == true); 3816 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3817 /* Check that the iov has been set up */ 3818 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3819 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3820 /* Check that the bdev_io has been saved */ 3821 CU_ASSERT(g_zcopy_bdev_io != NULL); 3822 /* Now do the zcopy end for a write (commit=true) */ 3823 g_io_done = false; 3824 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3825 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3826 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3827 CU_ASSERT_EQUAL(rc, 0); 3828 num_completed = stub_complete_io(1); 3829 CU_ASSERT_EQUAL(num_completed, 1); 3830 CU_ASSERT(g_io_done == true); 3831 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3832 /* Check the g_zcopy are reset by io_done */ 3833 CU_ASSERT(g_zcopy_write_buf == NULL); 3834 CU_ASSERT(g_zcopy_write_buf_len == 0); 3835 /* Check that io_done has freed the g_zcopy_bdev_io */ 3836 CU_ASSERT(g_zcopy_bdev_io == NULL); 3837 3838 /* Check the zcopy read buffer has not been touched which 3839 * ensures that the correct buffers were used. 3840 */ 3841 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3842 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3843 3844 spdk_put_io_channel(ioch); 3845 spdk_bdev_close(desc); 3846 free_bdev(bdev); 3847 spdk_bdev_finish(bdev_fini_cb, NULL); 3848 poll_threads(); 3849 } 3850 3851 static void 3852 bdev_zcopy_read(void) 3853 { 3854 struct spdk_bdev *bdev; 3855 struct spdk_bdev_desc *desc = NULL; 3856 struct spdk_io_channel *ioch; 3857 struct ut_expected_io *expected_io; 3858 uint64_t offset, num_blocks; 3859 uint32_t num_completed; 3860 char aa_buf[512]; 3861 struct iovec iov; 3862 int rc; 3863 const bool populate = true; 3864 const bool commit = false; 3865 3866 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3867 3868 spdk_bdev_initialize(bdev_init_cb, NULL); 3869 bdev = allocate_bdev("bdev"); 3870 3871 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3872 CU_ASSERT_EQUAL(rc, 0); 3873 SPDK_CU_ASSERT_FATAL(desc != NULL); 3874 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3875 ioch = spdk_bdev_get_io_channel(desc); 3876 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3877 3878 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3879 3880 offset = 50; 3881 num_blocks = 1; 3882 iov.iov_base = NULL; 3883 iov.iov_len = 0; 3884 3885 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3886 g_zcopy_write_buf_len = (uint32_t) -1; 3887 3888 /* Do a zcopy start for a read (populate=true) */ 3889 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3890 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3891 g_io_done = false; 3892 g_zcopy_read_buf = aa_buf; 3893 g_zcopy_read_buf_len = sizeof(aa_buf); 3894 g_zcopy_bdev_io = NULL; 3895 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3896 CU_ASSERT_EQUAL(rc, 0); 3897 num_completed = stub_complete_io(1); 3898 CU_ASSERT_EQUAL(num_completed, 1); 3899 CU_ASSERT(g_io_done == true); 3900 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3901 /* Check that the iov has been set up */ 3902 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3903 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3904 /* Check that the bdev_io has been saved */ 3905 CU_ASSERT(g_zcopy_bdev_io != NULL); 3906 3907 /* Now do the zcopy end for a read (commit=false) */ 3908 g_io_done = false; 3909 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3910 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3911 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3912 CU_ASSERT_EQUAL(rc, 0); 3913 num_completed = stub_complete_io(1); 3914 CU_ASSERT_EQUAL(num_completed, 1); 3915 CU_ASSERT(g_io_done == true); 3916 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3917 /* Check the g_zcopy are reset by io_done */ 3918 CU_ASSERT(g_zcopy_read_buf == NULL); 3919 CU_ASSERT(g_zcopy_read_buf_len == 0); 3920 /* Check that io_done has freed the g_zcopy_bdev_io */ 3921 CU_ASSERT(g_zcopy_bdev_io == NULL); 3922 3923 /* Check the zcopy write buffer has not been touched which 3924 * ensures that the correct buffers were used. 3925 */ 3926 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3927 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3928 3929 spdk_put_io_channel(ioch); 3930 spdk_bdev_close(desc); 3931 free_bdev(bdev); 3932 spdk_bdev_finish(bdev_fini_cb, NULL); 3933 poll_threads(); 3934 } 3935 3936 static void 3937 bdev_open_while_hotremove(void) 3938 { 3939 struct spdk_bdev *bdev; 3940 struct spdk_bdev_desc *desc[2] = {}; 3941 int rc; 3942 3943 bdev = allocate_bdev("bdev"); 3944 3945 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3946 CU_ASSERT(rc == 0); 3947 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3948 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3949 3950 spdk_bdev_unregister(bdev, NULL, NULL); 3951 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 3952 poll_threads(); 3953 3954 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3955 CU_ASSERT(rc == -ENODEV); 3956 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3957 3958 spdk_bdev_close(desc[0]); 3959 free_bdev(bdev); 3960 } 3961 3962 static void 3963 bdev_close_while_hotremove(void) 3964 { 3965 struct spdk_bdev *bdev; 3966 struct spdk_bdev_desc *desc = NULL; 3967 int rc = 0; 3968 3969 bdev = allocate_bdev("bdev"); 3970 3971 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3972 CU_ASSERT_EQUAL(rc, 0); 3973 SPDK_CU_ASSERT_FATAL(desc != NULL); 3974 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3975 3976 /* Simulate hot-unplug by unregistering bdev */ 3977 g_event_type1 = 0xFF; 3978 g_unregister_arg = NULL; 3979 g_unregister_rc = -1; 3980 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 3981 /* Close device while remove event is in flight */ 3982 spdk_bdev_close(desc); 3983 3984 /* Ensure that unregister callback is delayed */ 3985 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 3986 CU_ASSERT_EQUAL(g_unregister_rc, -1); 3987 3988 poll_threads(); 3989 3990 /* Event callback shall not be issued because device was closed */ 3991 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 3992 /* Unregister callback is issued */ 3993 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 3994 CU_ASSERT_EQUAL(g_unregister_rc, 0); 3995 3996 free_bdev(bdev); 3997 } 3998 3999 static void 4000 bdev_open_ext(void) 4001 { 4002 struct spdk_bdev *bdev; 4003 struct spdk_bdev_desc *desc1 = NULL; 4004 struct spdk_bdev_desc *desc2 = NULL; 4005 int rc = 0; 4006 4007 bdev = allocate_bdev("bdev"); 4008 4009 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4010 CU_ASSERT_EQUAL(rc, -EINVAL); 4011 4012 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4013 CU_ASSERT_EQUAL(rc, 0); 4014 4015 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4016 CU_ASSERT_EQUAL(rc, 0); 4017 4018 g_event_type1 = 0xFF; 4019 g_event_type2 = 0xFF; 4020 4021 /* Simulate hot-unplug by unregistering bdev */ 4022 spdk_bdev_unregister(bdev, NULL, NULL); 4023 poll_threads(); 4024 4025 /* Check if correct events have been triggered in event callback fn */ 4026 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4027 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4028 4029 free_bdev(bdev); 4030 poll_threads(); 4031 } 4032 4033 static void 4034 bdev_open_ext_unregister(void) 4035 { 4036 struct spdk_bdev *bdev; 4037 struct spdk_bdev_desc *desc1 = NULL; 4038 struct spdk_bdev_desc *desc2 = NULL; 4039 struct spdk_bdev_desc *desc3 = NULL; 4040 struct spdk_bdev_desc *desc4 = NULL; 4041 int rc = 0; 4042 4043 bdev = allocate_bdev("bdev"); 4044 4045 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4046 CU_ASSERT_EQUAL(rc, -EINVAL); 4047 4048 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4049 CU_ASSERT_EQUAL(rc, 0); 4050 4051 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4052 CU_ASSERT_EQUAL(rc, 0); 4053 4054 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4055 CU_ASSERT_EQUAL(rc, 0); 4056 4057 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4058 CU_ASSERT_EQUAL(rc, 0); 4059 4060 g_event_type1 = 0xFF; 4061 g_event_type2 = 0xFF; 4062 g_event_type3 = 0xFF; 4063 g_event_type4 = 0xFF; 4064 4065 g_unregister_arg = NULL; 4066 g_unregister_rc = -1; 4067 4068 /* Simulate hot-unplug by unregistering bdev */ 4069 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4070 4071 /* 4072 * Unregister is handled asynchronously and event callback 4073 * (i.e., above bdev_open_cbN) will be called. 4074 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4075 * close the desc3 and desc4 so that the bdev is not closed. 4076 */ 4077 poll_threads(); 4078 4079 /* Check if correct events have been triggered in event callback fn */ 4080 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4081 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4082 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4083 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4084 4085 /* Check that unregister callback is delayed */ 4086 CU_ASSERT(g_unregister_arg == NULL); 4087 CU_ASSERT(g_unregister_rc == -1); 4088 4089 /* 4090 * Explicitly close desc3. As desc4 is still opened there, the 4091 * unergister callback is still delayed to execute. 4092 */ 4093 spdk_bdev_close(desc3); 4094 CU_ASSERT(g_unregister_arg == NULL); 4095 CU_ASSERT(g_unregister_rc == -1); 4096 4097 /* 4098 * Explicitly close desc4 to trigger the ongoing bdev unregister 4099 * operation after last desc is closed. 4100 */ 4101 spdk_bdev_close(desc4); 4102 4103 /* Poll the thread for the async unregister operation */ 4104 poll_threads(); 4105 4106 /* Check that unregister callback is executed */ 4107 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4108 CU_ASSERT(g_unregister_rc == 0); 4109 4110 free_bdev(bdev); 4111 poll_threads(); 4112 } 4113 4114 struct timeout_io_cb_arg { 4115 struct iovec iov; 4116 uint8_t type; 4117 }; 4118 4119 static int 4120 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4121 { 4122 struct spdk_bdev_io *bdev_io; 4123 int n = 0; 4124 4125 if (!ch) { 4126 return -1; 4127 } 4128 4129 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4130 n++; 4131 } 4132 4133 return n; 4134 } 4135 4136 static void 4137 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4138 { 4139 struct timeout_io_cb_arg *ctx = cb_arg; 4140 4141 ctx->type = bdev_io->type; 4142 ctx->iov.iov_base = bdev_io->iov.iov_base; 4143 ctx->iov.iov_len = bdev_io->iov.iov_len; 4144 } 4145 4146 static void 4147 bdev_set_io_timeout(void) 4148 { 4149 struct spdk_bdev *bdev; 4150 struct spdk_bdev_desc *desc = NULL; 4151 struct spdk_io_channel *io_ch = NULL; 4152 struct spdk_bdev_channel *bdev_ch = NULL; 4153 struct timeout_io_cb_arg cb_arg; 4154 4155 spdk_bdev_initialize(bdev_init_cb, NULL); 4156 4157 bdev = allocate_bdev("bdev"); 4158 4159 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4160 SPDK_CU_ASSERT_FATAL(desc != NULL); 4161 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4162 4163 io_ch = spdk_bdev_get_io_channel(desc); 4164 CU_ASSERT(io_ch != NULL); 4165 4166 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4167 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4168 4169 /* This is the part1. 4170 * We will check the bdev_ch->io_submitted list 4171 * TO make sure that it can link IOs and only the user submitted IOs 4172 */ 4173 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4174 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4175 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4176 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4177 stub_complete_io(1); 4178 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4179 stub_complete_io(1); 4180 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4181 4182 /* Split IO */ 4183 bdev->optimal_io_boundary = 16; 4184 bdev->split_on_optimal_io_boundary = true; 4185 4186 /* Now test that a single-vector command is split correctly. 4187 * Offset 14, length 8, payload 0xF000 4188 * Child - Offset 14, length 2, payload 0xF000 4189 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4190 * 4191 * Set up the expected values before calling spdk_bdev_read_blocks 4192 */ 4193 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4194 /* We count all submitted IOs including IO that are generated by splitting. */ 4195 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4196 stub_complete_io(1); 4197 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4198 stub_complete_io(1); 4199 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4200 4201 /* Also include the reset IO */ 4202 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4203 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4204 poll_threads(); 4205 stub_complete_io(1); 4206 poll_threads(); 4207 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4208 4209 /* This is part2 4210 * Test the desc timeout poller register 4211 */ 4212 4213 /* Successfully set the timeout */ 4214 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4215 CU_ASSERT(desc->io_timeout_poller != NULL); 4216 CU_ASSERT(desc->timeout_in_sec == 30); 4217 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4218 CU_ASSERT(desc->cb_arg == &cb_arg); 4219 4220 /* Change the timeout limit */ 4221 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4222 CU_ASSERT(desc->io_timeout_poller != NULL); 4223 CU_ASSERT(desc->timeout_in_sec == 20); 4224 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4225 CU_ASSERT(desc->cb_arg == &cb_arg); 4226 4227 /* Disable the timeout */ 4228 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4229 CU_ASSERT(desc->io_timeout_poller == NULL); 4230 4231 /* This the part3 4232 * We will test to catch timeout IO and check whether the IO is 4233 * the submitted one. 4234 */ 4235 memset(&cb_arg, 0, sizeof(cb_arg)); 4236 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4237 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4238 4239 /* Don't reach the limit */ 4240 spdk_delay_us(15 * spdk_get_ticks_hz()); 4241 poll_threads(); 4242 CU_ASSERT(cb_arg.type == 0); 4243 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4244 CU_ASSERT(cb_arg.iov.iov_len == 0); 4245 4246 /* 15 + 15 = 30 reach the limit */ 4247 spdk_delay_us(15 * spdk_get_ticks_hz()); 4248 poll_threads(); 4249 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4250 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4251 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4252 stub_complete_io(1); 4253 4254 /* Use the same split IO above and check the IO */ 4255 memset(&cb_arg, 0, sizeof(cb_arg)); 4256 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4257 4258 /* The first child complete in time */ 4259 spdk_delay_us(15 * spdk_get_ticks_hz()); 4260 poll_threads(); 4261 stub_complete_io(1); 4262 CU_ASSERT(cb_arg.type == 0); 4263 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4264 CU_ASSERT(cb_arg.iov.iov_len == 0); 4265 4266 /* The second child reach the limit */ 4267 spdk_delay_us(15 * spdk_get_ticks_hz()); 4268 poll_threads(); 4269 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4270 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4271 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4272 stub_complete_io(1); 4273 4274 /* Also include the reset IO */ 4275 memset(&cb_arg, 0, sizeof(cb_arg)); 4276 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4277 spdk_delay_us(30 * spdk_get_ticks_hz()); 4278 poll_threads(); 4279 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4280 stub_complete_io(1); 4281 poll_threads(); 4282 4283 spdk_put_io_channel(io_ch); 4284 spdk_bdev_close(desc); 4285 free_bdev(bdev); 4286 spdk_bdev_finish(bdev_fini_cb, NULL); 4287 poll_threads(); 4288 } 4289 4290 static void 4291 bdev_set_qd_sampling(void) 4292 { 4293 struct spdk_bdev *bdev; 4294 struct spdk_bdev_desc *desc = NULL; 4295 struct spdk_io_channel *io_ch = NULL; 4296 struct spdk_bdev_channel *bdev_ch = NULL; 4297 struct timeout_io_cb_arg cb_arg; 4298 4299 spdk_bdev_initialize(bdev_init_cb, NULL); 4300 4301 bdev = allocate_bdev("bdev"); 4302 4303 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4304 SPDK_CU_ASSERT_FATAL(desc != NULL); 4305 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4306 4307 io_ch = spdk_bdev_get_io_channel(desc); 4308 CU_ASSERT(io_ch != NULL); 4309 4310 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4311 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4312 4313 /* This is the part1. 4314 * We will check the bdev_ch->io_submitted list 4315 * TO make sure that it can link IOs and only the user submitted IOs 4316 */ 4317 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4318 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4319 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4320 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4321 stub_complete_io(1); 4322 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4323 stub_complete_io(1); 4324 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4325 4326 /* This is the part2. 4327 * Test the bdev's qd poller register 4328 */ 4329 /* 1st Successfully set the qd sampling period */ 4330 spdk_bdev_set_qd_sampling_period(bdev, 10); 4331 CU_ASSERT(bdev->internal.new_period == 10); 4332 CU_ASSERT(bdev->internal.period == 10); 4333 CU_ASSERT(bdev->internal.qd_desc != NULL); 4334 poll_threads(); 4335 CU_ASSERT(bdev->internal.qd_poller != NULL); 4336 4337 /* 2nd Change the qd sampling period */ 4338 spdk_bdev_set_qd_sampling_period(bdev, 20); 4339 CU_ASSERT(bdev->internal.new_period == 20); 4340 CU_ASSERT(bdev->internal.period == 10); 4341 CU_ASSERT(bdev->internal.qd_desc != NULL); 4342 poll_threads(); 4343 CU_ASSERT(bdev->internal.qd_poller != NULL); 4344 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4345 4346 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4347 spdk_delay_us(20); 4348 poll_thread_times(0, 1); 4349 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4350 spdk_bdev_set_qd_sampling_period(bdev, 30); 4351 CU_ASSERT(bdev->internal.new_period == 30); 4352 CU_ASSERT(bdev->internal.period == 20); 4353 poll_threads(); 4354 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4355 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4356 4357 /* 4th Disable the qd sampling period */ 4358 spdk_bdev_set_qd_sampling_period(bdev, 0); 4359 CU_ASSERT(bdev->internal.new_period == 0); 4360 CU_ASSERT(bdev->internal.period == 30); 4361 poll_threads(); 4362 CU_ASSERT(bdev->internal.qd_poller == NULL); 4363 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4364 CU_ASSERT(bdev->internal.qd_desc == NULL); 4365 4366 /* This is the part3. 4367 * We will test the submitted IO and reset works 4368 * properly with the qd sampling. 4369 */ 4370 memset(&cb_arg, 0, sizeof(cb_arg)); 4371 spdk_bdev_set_qd_sampling_period(bdev, 1); 4372 poll_threads(); 4373 4374 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4375 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4376 4377 /* Also include the reset IO */ 4378 memset(&cb_arg, 0, sizeof(cb_arg)); 4379 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4380 poll_threads(); 4381 4382 /* Close the desc */ 4383 spdk_put_io_channel(io_ch); 4384 spdk_bdev_close(desc); 4385 4386 /* Complete the submitted IO and reset */ 4387 stub_complete_io(2); 4388 poll_threads(); 4389 4390 free_bdev(bdev); 4391 spdk_bdev_finish(bdev_fini_cb, NULL); 4392 poll_threads(); 4393 } 4394 4395 static void 4396 lba_range_overlap(void) 4397 { 4398 struct lba_range r1, r2; 4399 4400 r1.offset = 100; 4401 r1.length = 50; 4402 4403 r2.offset = 0; 4404 r2.length = 1; 4405 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4406 4407 r2.offset = 0; 4408 r2.length = 100; 4409 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4410 4411 r2.offset = 0; 4412 r2.length = 110; 4413 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4414 4415 r2.offset = 100; 4416 r2.length = 10; 4417 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4418 4419 r2.offset = 110; 4420 r2.length = 20; 4421 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4422 4423 r2.offset = 140; 4424 r2.length = 150; 4425 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4426 4427 r2.offset = 130; 4428 r2.length = 200; 4429 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4430 4431 r2.offset = 150; 4432 r2.length = 100; 4433 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4434 4435 r2.offset = 110; 4436 r2.length = 0; 4437 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4438 } 4439 4440 static bool g_lock_lba_range_done; 4441 static bool g_unlock_lba_range_done; 4442 4443 static void 4444 lock_lba_range_done(void *ctx, int status) 4445 { 4446 g_lock_lba_range_done = true; 4447 } 4448 4449 static void 4450 unlock_lba_range_done(void *ctx, int status) 4451 { 4452 g_unlock_lba_range_done = true; 4453 } 4454 4455 static void 4456 lock_lba_range_check_ranges(void) 4457 { 4458 struct spdk_bdev *bdev; 4459 struct spdk_bdev_desc *desc = NULL; 4460 struct spdk_io_channel *io_ch; 4461 struct spdk_bdev_channel *channel; 4462 struct lba_range *range; 4463 int ctx1; 4464 int rc; 4465 4466 spdk_bdev_initialize(bdev_init_cb, NULL); 4467 4468 bdev = allocate_bdev("bdev0"); 4469 4470 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4471 CU_ASSERT(rc == 0); 4472 CU_ASSERT(desc != NULL); 4473 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4474 io_ch = spdk_bdev_get_io_channel(desc); 4475 CU_ASSERT(io_ch != NULL); 4476 channel = spdk_io_channel_get_ctx(io_ch); 4477 4478 g_lock_lba_range_done = false; 4479 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4480 CU_ASSERT(rc == 0); 4481 poll_threads(); 4482 4483 CU_ASSERT(g_lock_lba_range_done == true); 4484 range = TAILQ_FIRST(&channel->locked_ranges); 4485 SPDK_CU_ASSERT_FATAL(range != NULL); 4486 CU_ASSERT(range->offset == 20); 4487 CU_ASSERT(range->length == 10); 4488 CU_ASSERT(range->owner_ch == channel); 4489 4490 /* Unlocks must exactly match a lock. */ 4491 g_unlock_lba_range_done = false; 4492 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4493 CU_ASSERT(rc == -EINVAL); 4494 CU_ASSERT(g_unlock_lba_range_done == false); 4495 4496 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4497 CU_ASSERT(rc == 0); 4498 spdk_delay_us(100); 4499 poll_threads(); 4500 4501 CU_ASSERT(g_unlock_lba_range_done == true); 4502 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4503 4504 spdk_put_io_channel(io_ch); 4505 spdk_bdev_close(desc); 4506 free_bdev(bdev); 4507 spdk_bdev_finish(bdev_fini_cb, NULL); 4508 poll_threads(); 4509 } 4510 4511 static void 4512 lock_lba_range_with_io_outstanding(void) 4513 { 4514 struct spdk_bdev *bdev; 4515 struct spdk_bdev_desc *desc = NULL; 4516 struct spdk_io_channel *io_ch; 4517 struct spdk_bdev_channel *channel; 4518 struct lba_range *range; 4519 char buf[4096]; 4520 int ctx1; 4521 int rc; 4522 4523 spdk_bdev_initialize(bdev_init_cb, NULL); 4524 4525 bdev = allocate_bdev("bdev0"); 4526 4527 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4528 CU_ASSERT(rc == 0); 4529 CU_ASSERT(desc != NULL); 4530 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4531 io_ch = spdk_bdev_get_io_channel(desc); 4532 CU_ASSERT(io_ch != NULL); 4533 channel = spdk_io_channel_get_ctx(io_ch); 4534 4535 g_io_done = false; 4536 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4537 CU_ASSERT(rc == 0); 4538 4539 g_lock_lba_range_done = false; 4540 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4541 CU_ASSERT(rc == 0); 4542 poll_threads(); 4543 4544 /* The lock should immediately become valid, since there are no outstanding 4545 * write I/O. 4546 */ 4547 CU_ASSERT(g_io_done == false); 4548 CU_ASSERT(g_lock_lba_range_done == true); 4549 range = TAILQ_FIRST(&channel->locked_ranges); 4550 SPDK_CU_ASSERT_FATAL(range != NULL); 4551 CU_ASSERT(range->offset == 20); 4552 CU_ASSERT(range->length == 10); 4553 CU_ASSERT(range->owner_ch == channel); 4554 CU_ASSERT(range->locked_ctx == &ctx1); 4555 4556 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4557 CU_ASSERT(rc == 0); 4558 stub_complete_io(1); 4559 spdk_delay_us(100); 4560 poll_threads(); 4561 4562 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4563 4564 /* Now try again, but with a write I/O. */ 4565 g_io_done = false; 4566 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4567 CU_ASSERT(rc == 0); 4568 4569 g_lock_lba_range_done = false; 4570 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4571 CU_ASSERT(rc == 0); 4572 poll_threads(); 4573 4574 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4575 * But note that the range should be on the channel's locked_list, to make sure no 4576 * new write I/O are started. 4577 */ 4578 CU_ASSERT(g_io_done == false); 4579 CU_ASSERT(g_lock_lba_range_done == false); 4580 range = TAILQ_FIRST(&channel->locked_ranges); 4581 SPDK_CU_ASSERT_FATAL(range != NULL); 4582 CU_ASSERT(range->offset == 20); 4583 CU_ASSERT(range->length == 10); 4584 4585 /* Complete the write I/O. This should make the lock valid (checked by confirming 4586 * our callback was invoked). 4587 */ 4588 stub_complete_io(1); 4589 spdk_delay_us(100); 4590 poll_threads(); 4591 CU_ASSERT(g_io_done == true); 4592 CU_ASSERT(g_lock_lba_range_done == true); 4593 4594 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4595 CU_ASSERT(rc == 0); 4596 poll_threads(); 4597 4598 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4599 4600 spdk_put_io_channel(io_ch); 4601 spdk_bdev_close(desc); 4602 free_bdev(bdev); 4603 spdk_bdev_finish(bdev_fini_cb, NULL); 4604 poll_threads(); 4605 } 4606 4607 static void 4608 lock_lba_range_overlapped(void) 4609 { 4610 struct spdk_bdev *bdev; 4611 struct spdk_bdev_desc *desc = NULL; 4612 struct spdk_io_channel *io_ch; 4613 struct spdk_bdev_channel *channel; 4614 struct lba_range *range; 4615 int ctx1; 4616 int rc; 4617 4618 spdk_bdev_initialize(bdev_init_cb, NULL); 4619 4620 bdev = allocate_bdev("bdev0"); 4621 4622 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4623 CU_ASSERT(rc == 0); 4624 CU_ASSERT(desc != NULL); 4625 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4626 io_ch = spdk_bdev_get_io_channel(desc); 4627 CU_ASSERT(io_ch != NULL); 4628 channel = spdk_io_channel_get_ctx(io_ch); 4629 4630 /* Lock range 20-29. */ 4631 g_lock_lba_range_done = false; 4632 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4633 CU_ASSERT(rc == 0); 4634 poll_threads(); 4635 4636 CU_ASSERT(g_lock_lba_range_done == true); 4637 range = TAILQ_FIRST(&channel->locked_ranges); 4638 SPDK_CU_ASSERT_FATAL(range != NULL); 4639 CU_ASSERT(range->offset == 20); 4640 CU_ASSERT(range->length == 10); 4641 4642 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4643 * 20-29. 4644 */ 4645 g_lock_lba_range_done = false; 4646 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4647 CU_ASSERT(rc == 0); 4648 poll_threads(); 4649 4650 CU_ASSERT(g_lock_lba_range_done == false); 4651 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4652 SPDK_CU_ASSERT_FATAL(range != NULL); 4653 CU_ASSERT(range->offset == 25); 4654 CU_ASSERT(range->length == 15); 4655 4656 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4657 * no longer overlaps with an active lock. 4658 */ 4659 g_unlock_lba_range_done = false; 4660 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4661 CU_ASSERT(rc == 0); 4662 poll_threads(); 4663 4664 CU_ASSERT(g_unlock_lba_range_done == true); 4665 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4666 range = TAILQ_FIRST(&channel->locked_ranges); 4667 SPDK_CU_ASSERT_FATAL(range != NULL); 4668 CU_ASSERT(range->offset == 25); 4669 CU_ASSERT(range->length == 15); 4670 4671 /* Lock 40-59. This should immediately lock since it does not overlap with the 4672 * currently active 25-39 lock. 4673 */ 4674 g_lock_lba_range_done = false; 4675 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4676 CU_ASSERT(rc == 0); 4677 poll_threads(); 4678 4679 CU_ASSERT(g_lock_lba_range_done == true); 4680 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4681 SPDK_CU_ASSERT_FATAL(range != NULL); 4682 range = TAILQ_NEXT(range, tailq); 4683 SPDK_CU_ASSERT_FATAL(range != NULL); 4684 CU_ASSERT(range->offset == 40); 4685 CU_ASSERT(range->length == 20); 4686 4687 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4688 g_lock_lba_range_done = false; 4689 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4690 CU_ASSERT(rc == 0); 4691 poll_threads(); 4692 4693 CU_ASSERT(g_lock_lba_range_done == false); 4694 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4695 SPDK_CU_ASSERT_FATAL(range != NULL); 4696 CU_ASSERT(range->offset == 35); 4697 CU_ASSERT(range->length == 10); 4698 4699 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4700 * the 40-59 lock is still active. 4701 */ 4702 g_unlock_lba_range_done = false; 4703 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4704 CU_ASSERT(rc == 0); 4705 poll_threads(); 4706 4707 CU_ASSERT(g_unlock_lba_range_done == true); 4708 CU_ASSERT(g_lock_lba_range_done == false); 4709 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4710 SPDK_CU_ASSERT_FATAL(range != NULL); 4711 CU_ASSERT(range->offset == 35); 4712 CU_ASSERT(range->length == 10); 4713 4714 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4715 * no longer any active overlapping locks. 4716 */ 4717 g_unlock_lba_range_done = false; 4718 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4719 CU_ASSERT(rc == 0); 4720 poll_threads(); 4721 4722 CU_ASSERT(g_unlock_lba_range_done == true); 4723 CU_ASSERT(g_lock_lba_range_done == true); 4724 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4725 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4726 SPDK_CU_ASSERT_FATAL(range != NULL); 4727 CU_ASSERT(range->offset == 35); 4728 CU_ASSERT(range->length == 10); 4729 4730 /* Finally, unlock 35-44. */ 4731 g_unlock_lba_range_done = false; 4732 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4733 CU_ASSERT(rc == 0); 4734 poll_threads(); 4735 4736 CU_ASSERT(g_unlock_lba_range_done == true); 4737 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4738 4739 spdk_put_io_channel(io_ch); 4740 spdk_bdev_close(desc); 4741 free_bdev(bdev); 4742 spdk_bdev_finish(bdev_fini_cb, NULL); 4743 poll_threads(); 4744 } 4745 4746 static void 4747 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4748 { 4749 g_abort_done = true; 4750 g_abort_status = bdev_io->internal.status; 4751 spdk_bdev_free_io(bdev_io); 4752 } 4753 4754 static void 4755 bdev_io_abort(void) 4756 { 4757 struct spdk_bdev *bdev; 4758 struct spdk_bdev_desc *desc = NULL; 4759 struct spdk_io_channel *io_ch; 4760 struct spdk_bdev_channel *channel; 4761 struct spdk_bdev_mgmt_channel *mgmt_ch; 4762 struct spdk_bdev_opts bdev_opts = {}; 4763 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4764 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4765 int rc; 4766 4767 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4768 bdev_opts.bdev_io_pool_size = 7; 4769 bdev_opts.bdev_io_cache_size = 2; 4770 4771 rc = spdk_bdev_set_opts(&bdev_opts); 4772 CU_ASSERT(rc == 0); 4773 spdk_bdev_initialize(bdev_init_cb, NULL); 4774 4775 bdev = allocate_bdev("bdev0"); 4776 4777 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4778 CU_ASSERT(rc == 0); 4779 CU_ASSERT(desc != NULL); 4780 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4781 io_ch = spdk_bdev_get_io_channel(desc); 4782 CU_ASSERT(io_ch != NULL); 4783 channel = spdk_io_channel_get_ctx(io_ch); 4784 mgmt_ch = channel->shared_resource->mgmt_ch; 4785 4786 g_abort_done = false; 4787 4788 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4789 4790 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4791 CU_ASSERT(rc == -ENOTSUP); 4792 4793 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4794 4795 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4796 CU_ASSERT(rc == 0); 4797 CU_ASSERT(g_abort_done == true); 4798 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4799 4800 /* Test the case that the target I/O was successfully aborted. */ 4801 g_io_done = false; 4802 4803 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4804 CU_ASSERT(rc == 0); 4805 CU_ASSERT(g_io_done == false); 4806 4807 g_abort_done = false; 4808 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4809 4810 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4811 CU_ASSERT(rc == 0); 4812 CU_ASSERT(g_io_done == true); 4813 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4814 stub_complete_io(1); 4815 CU_ASSERT(g_abort_done == true); 4816 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4817 4818 /* Test the case that the target I/O was not aborted because it completed 4819 * in the middle of execution of the abort. 4820 */ 4821 g_io_done = false; 4822 4823 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4824 CU_ASSERT(rc == 0); 4825 CU_ASSERT(g_io_done == false); 4826 4827 g_abort_done = false; 4828 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4829 4830 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4831 CU_ASSERT(rc == 0); 4832 CU_ASSERT(g_io_done == false); 4833 4834 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4835 stub_complete_io(1); 4836 CU_ASSERT(g_io_done == true); 4837 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4838 4839 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4840 stub_complete_io(1); 4841 CU_ASSERT(g_abort_done == true); 4842 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4843 4844 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4845 4846 bdev->optimal_io_boundary = 16; 4847 bdev->split_on_optimal_io_boundary = true; 4848 4849 /* Test that a single-vector command which is split is aborted correctly. 4850 * Offset 14, length 8, payload 0xF000 4851 * Child - Offset 14, length 2, payload 0xF000 4852 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4853 */ 4854 g_io_done = false; 4855 4856 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4857 CU_ASSERT(rc == 0); 4858 CU_ASSERT(g_io_done == false); 4859 4860 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4861 4862 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4863 4864 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4865 CU_ASSERT(rc == 0); 4866 CU_ASSERT(g_io_done == true); 4867 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4868 stub_complete_io(2); 4869 CU_ASSERT(g_abort_done == true); 4870 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4871 4872 /* Test that a multi-vector command that needs to be split by strip and then 4873 * needs to be split is aborted correctly. Abort is requested before the second 4874 * child I/O was submitted. The parent I/O should complete with failure without 4875 * submitting the second child I/O. 4876 */ 4877 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4878 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4879 iov[i].iov_len = 512; 4880 } 4881 4882 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4883 g_io_done = false; 4884 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4885 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4886 CU_ASSERT(rc == 0); 4887 CU_ASSERT(g_io_done == false); 4888 4889 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4890 4891 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4892 4893 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4894 CU_ASSERT(rc == 0); 4895 CU_ASSERT(g_io_done == true); 4896 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4897 stub_complete_io(1); 4898 CU_ASSERT(g_abort_done == true); 4899 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4900 4901 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4902 4903 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4904 4905 bdev->optimal_io_boundary = 16; 4906 g_io_done = false; 4907 4908 /* Test that a ingle-vector command which is split is aborted correctly. 4909 * Differently from the above, the child abort request will be submitted 4910 * sequentially due to the capacity of spdk_bdev_io. 4911 */ 4912 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4913 CU_ASSERT(rc == 0); 4914 CU_ASSERT(g_io_done == false); 4915 4916 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4917 4918 g_abort_done = false; 4919 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4920 4921 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4922 CU_ASSERT(rc == 0); 4923 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4924 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4925 4926 stub_complete_io(1); 4927 CU_ASSERT(g_io_done == true); 4928 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4929 stub_complete_io(3); 4930 CU_ASSERT(g_abort_done == true); 4931 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4932 4933 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4934 4935 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4936 4937 spdk_put_io_channel(io_ch); 4938 spdk_bdev_close(desc); 4939 free_bdev(bdev); 4940 spdk_bdev_finish(bdev_fini_cb, NULL); 4941 poll_threads(); 4942 } 4943 4944 static void 4945 bdev_unmap(void) 4946 { 4947 struct spdk_bdev *bdev; 4948 struct spdk_bdev_desc *desc = NULL; 4949 struct spdk_io_channel *ioch; 4950 struct spdk_bdev_channel *bdev_ch; 4951 struct ut_expected_io *expected_io; 4952 struct spdk_bdev_opts bdev_opts = {}; 4953 uint32_t i, num_outstanding; 4954 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4955 int rc; 4956 4957 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4958 bdev_opts.bdev_io_pool_size = 512; 4959 bdev_opts.bdev_io_cache_size = 64; 4960 rc = spdk_bdev_set_opts(&bdev_opts); 4961 CU_ASSERT(rc == 0); 4962 4963 spdk_bdev_initialize(bdev_init_cb, NULL); 4964 bdev = allocate_bdev("bdev"); 4965 4966 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4967 CU_ASSERT_EQUAL(rc, 0); 4968 SPDK_CU_ASSERT_FATAL(desc != NULL); 4969 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4970 ioch = spdk_bdev_get_io_channel(desc); 4971 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4972 bdev_ch = spdk_io_channel_get_ctx(ioch); 4973 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4974 4975 fn_table.submit_request = stub_submit_request; 4976 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4977 4978 /* Case 1: First test the request won't be split */ 4979 num_blocks = 32; 4980 4981 g_io_done = false; 4982 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 4983 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4984 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4985 CU_ASSERT_EQUAL(rc, 0); 4986 CU_ASSERT(g_io_done == false); 4987 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4988 stub_complete_io(1); 4989 CU_ASSERT(g_io_done == true); 4990 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4991 4992 /* Case 2: Test the split with 2 children requests */ 4993 bdev->max_unmap = 8; 4994 bdev->max_unmap_segments = 2; 4995 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 4996 num_blocks = max_unmap_blocks * 2; 4997 offset = 0; 4998 4999 g_io_done = false; 5000 for (i = 0; i < 2; i++) { 5001 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5002 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5003 offset += max_unmap_blocks; 5004 } 5005 5006 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5007 CU_ASSERT_EQUAL(rc, 0); 5008 CU_ASSERT(g_io_done == false); 5009 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5010 stub_complete_io(2); 5011 CU_ASSERT(g_io_done == true); 5012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5013 5014 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5015 num_children = 15; 5016 num_blocks = max_unmap_blocks * num_children; 5017 g_io_done = false; 5018 offset = 0; 5019 for (i = 0; i < num_children; i++) { 5020 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5021 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5022 offset += max_unmap_blocks; 5023 } 5024 5025 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5026 CU_ASSERT_EQUAL(rc, 0); 5027 CU_ASSERT(g_io_done == false); 5028 5029 while (num_children > 0) { 5030 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5031 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5032 stub_complete_io(num_outstanding); 5033 num_children -= num_outstanding; 5034 } 5035 CU_ASSERT(g_io_done == true); 5036 5037 spdk_put_io_channel(ioch); 5038 spdk_bdev_close(desc); 5039 free_bdev(bdev); 5040 spdk_bdev_finish(bdev_fini_cb, NULL); 5041 poll_threads(); 5042 } 5043 5044 static void 5045 bdev_write_zeroes_split_test(void) 5046 { 5047 struct spdk_bdev *bdev; 5048 struct spdk_bdev_desc *desc = NULL; 5049 struct spdk_io_channel *ioch; 5050 struct spdk_bdev_channel *bdev_ch; 5051 struct ut_expected_io *expected_io; 5052 struct spdk_bdev_opts bdev_opts = {}; 5053 uint32_t i, num_outstanding; 5054 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5055 int rc; 5056 5057 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5058 bdev_opts.bdev_io_pool_size = 512; 5059 bdev_opts.bdev_io_cache_size = 64; 5060 rc = spdk_bdev_set_opts(&bdev_opts); 5061 CU_ASSERT(rc == 0); 5062 5063 spdk_bdev_initialize(bdev_init_cb, NULL); 5064 bdev = allocate_bdev("bdev"); 5065 5066 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5067 CU_ASSERT_EQUAL(rc, 0); 5068 SPDK_CU_ASSERT_FATAL(desc != NULL); 5069 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5070 ioch = spdk_bdev_get_io_channel(desc); 5071 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5072 bdev_ch = spdk_io_channel_get_ctx(ioch); 5073 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5074 5075 fn_table.submit_request = stub_submit_request; 5076 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5077 5078 /* Case 1: First test the request won't be split */ 5079 num_blocks = 32; 5080 5081 g_io_done = false; 5082 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5083 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5084 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5085 CU_ASSERT_EQUAL(rc, 0); 5086 CU_ASSERT(g_io_done == false); 5087 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5088 stub_complete_io(1); 5089 CU_ASSERT(g_io_done == true); 5090 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5091 5092 /* Case 2: Test the split with 2 children requests */ 5093 max_write_zeroes_blocks = 8; 5094 bdev->max_write_zeroes = max_write_zeroes_blocks; 5095 num_blocks = max_write_zeroes_blocks * 2; 5096 offset = 0; 5097 5098 g_io_done = false; 5099 for (i = 0; i < 2; i++) { 5100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5101 0); 5102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5103 offset += max_write_zeroes_blocks; 5104 } 5105 5106 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5107 CU_ASSERT_EQUAL(rc, 0); 5108 CU_ASSERT(g_io_done == false); 5109 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5110 stub_complete_io(2); 5111 CU_ASSERT(g_io_done == true); 5112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5113 5114 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5115 num_children = 15; 5116 num_blocks = max_write_zeroes_blocks * num_children; 5117 g_io_done = false; 5118 offset = 0; 5119 for (i = 0; i < num_children; i++) { 5120 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5121 0); 5122 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5123 offset += max_write_zeroes_blocks; 5124 } 5125 5126 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5127 CU_ASSERT_EQUAL(rc, 0); 5128 CU_ASSERT(g_io_done == false); 5129 5130 while (num_children > 0) { 5131 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5132 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5133 stub_complete_io(num_outstanding); 5134 num_children -= num_outstanding; 5135 } 5136 CU_ASSERT(g_io_done == true); 5137 5138 spdk_put_io_channel(ioch); 5139 spdk_bdev_close(desc); 5140 free_bdev(bdev); 5141 spdk_bdev_finish(bdev_fini_cb, NULL); 5142 poll_threads(); 5143 } 5144 5145 static void 5146 bdev_set_options_test(void) 5147 { 5148 struct spdk_bdev_opts bdev_opts = {}; 5149 int rc; 5150 5151 /* Case1: Do not set opts_size */ 5152 rc = spdk_bdev_set_opts(&bdev_opts); 5153 CU_ASSERT(rc == -1); 5154 5155 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5156 bdev_opts.bdev_io_pool_size = 4; 5157 bdev_opts.bdev_io_cache_size = 2; 5158 bdev_opts.small_buf_pool_size = 4; 5159 5160 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5161 rc = spdk_bdev_set_opts(&bdev_opts); 5162 CU_ASSERT(rc == -1); 5163 5164 /* Case 3: Do not set valid large_buf_pool_size */ 5165 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5166 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5167 rc = spdk_bdev_set_opts(&bdev_opts); 5168 CU_ASSERT(rc == -1); 5169 5170 /* Case4: set valid large buf_pool_size */ 5171 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5172 rc = spdk_bdev_set_opts(&bdev_opts); 5173 CU_ASSERT(rc == 0); 5174 5175 /* Case5: Set different valid value for small and large buf pool */ 5176 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5177 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5178 rc = spdk_bdev_set_opts(&bdev_opts); 5179 CU_ASSERT(rc == 0); 5180 } 5181 5182 static uint64_t 5183 get_ns_time(void) 5184 { 5185 int rc; 5186 struct timespec ts; 5187 5188 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5189 CU_ASSERT(rc == 0); 5190 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5191 } 5192 5193 static int 5194 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5195 { 5196 int h1, h2; 5197 5198 if (bdev_name == NULL) { 5199 return -1; 5200 } else { 5201 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5202 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5203 5204 return spdk_max(h1, h2) + 1; 5205 } 5206 } 5207 5208 static void 5209 bdev_multi_allocation(void) 5210 { 5211 const int max_bdev_num = 1024 * 16; 5212 char name[max_bdev_num][16]; 5213 char noexist_name[] = "invalid_bdev"; 5214 struct spdk_bdev *bdev[max_bdev_num]; 5215 int i, j; 5216 uint64_t last_time; 5217 int bdev_num; 5218 int height; 5219 5220 for (j = 0; j < max_bdev_num; j++) { 5221 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5222 } 5223 5224 for (i = 0; i < 16; i++) { 5225 last_time = get_ns_time(); 5226 bdev_num = 1024 * (i + 1); 5227 for (j = 0; j < bdev_num; j++) { 5228 bdev[j] = allocate_bdev(name[j]); 5229 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5230 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5231 } 5232 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5233 (get_ns_time() - last_time) / 1000 / 1000); 5234 for (j = 0; j < bdev_num; j++) { 5235 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5236 } 5237 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5238 5239 for (j = 0; j < bdev_num; j++) { 5240 free_bdev(bdev[j]); 5241 } 5242 for (j = 0; j < bdev_num; j++) { 5243 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5244 } 5245 } 5246 } 5247 5248 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5249 5250 static int 5251 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5252 int array_size) 5253 { 5254 if (array_size > 0 && domains) { 5255 domains[0] = g_bdev_memory_domain; 5256 } 5257 5258 return 1; 5259 } 5260 5261 static void 5262 bdev_get_memory_domains(void) 5263 { 5264 struct spdk_bdev_fn_table fn_table = { 5265 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5266 }; 5267 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5268 struct spdk_memory_domain *domains[2] = {}; 5269 int rc; 5270 5271 /* bdev is NULL */ 5272 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5273 CU_ASSERT(rc == -EINVAL); 5274 5275 /* domains is NULL */ 5276 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5277 CU_ASSERT(rc == 1); 5278 5279 /* array size is 0 */ 5280 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5281 CU_ASSERT(rc == 1); 5282 5283 /* get_supported_dma_device_types op is set */ 5284 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5285 CU_ASSERT(rc == 1); 5286 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5287 5288 /* get_supported_dma_device_types op is not set */ 5289 fn_table.get_memory_domains = NULL; 5290 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5291 CU_ASSERT(rc == 0); 5292 } 5293 5294 static void 5295 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5296 { 5297 struct spdk_bdev *bdev; 5298 struct spdk_bdev_desc *desc = NULL; 5299 struct spdk_io_channel *io_ch; 5300 char io_buf[512]; 5301 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5302 struct ut_expected_io *expected_io; 5303 int rc; 5304 5305 spdk_bdev_initialize(bdev_init_cb, NULL); 5306 5307 bdev = allocate_bdev("bdev0"); 5308 bdev->md_interleave = false; 5309 bdev->md_len = 8; 5310 5311 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5312 CU_ASSERT(rc == 0); 5313 SPDK_CU_ASSERT_FATAL(desc != NULL); 5314 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5315 io_ch = spdk_bdev_get_io_channel(desc); 5316 CU_ASSERT(io_ch != NULL); 5317 5318 /* read */ 5319 g_io_done = false; 5320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5321 if (ext_io_opts) { 5322 expected_io->md_buf = ext_io_opts->metadata; 5323 expected_io->ext_io_opts = ext_io_opts; 5324 } 5325 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5326 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5327 5328 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5329 5330 CU_ASSERT(rc == 0); 5331 CU_ASSERT(g_io_done == false); 5332 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5333 stub_complete_io(1); 5334 CU_ASSERT(g_io_done == true); 5335 5336 /* write */ 5337 g_io_done = false; 5338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5339 if (ext_io_opts) { 5340 expected_io->md_buf = ext_io_opts->metadata; 5341 expected_io->ext_io_opts = ext_io_opts; 5342 } 5343 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5344 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5345 5346 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5347 5348 CU_ASSERT(rc == 0); 5349 CU_ASSERT(g_io_done == false); 5350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5351 stub_complete_io(1); 5352 CU_ASSERT(g_io_done == true); 5353 5354 spdk_put_io_channel(io_ch); 5355 spdk_bdev_close(desc); 5356 free_bdev(bdev); 5357 spdk_bdev_finish(bdev_fini_cb, NULL); 5358 poll_threads(); 5359 5360 } 5361 5362 static void 5363 bdev_io_ext(void) 5364 { 5365 struct spdk_bdev_ext_io_opts ext_io_opts = { 5366 .metadata = (void *)0xFF000000, 5367 .size = sizeof(ext_io_opts) 5368 }; 5369 5370 _bdev_io_ext(&ext_io_opts); 5371 } 5372 5373 static void 5374 bdev_io_ext_no_opts(void) 5375 { 5376 _bdev_io_ext(NULL); 5377 } 5378 5379 static void 5380 bdev_io_ext_invalid_opts(void) 5381 { 5382 struct spdk_bdev *bdev; 5383 struct spdk_bdev_desc *desc = NULL; 5384 struct spdk_io_channel *io_ch; 5385 char io_buf[512]; 5386 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5387 struct spdk_bdev_ext_io_opts ext_io_opts = { 5388 .metadata = (void *)0xFF000000, 5389 .size = sizeof(ext_io_opts) 5390 }; 5391 int rc; 5392 5393 spdk_bdev_initialize(bdev_init_cb, NULL); 5394 5395 bdev = allocate_bdev("bdev0"); 5396 bdev->md_interleave = false; 5397 bdev->md_len = 8; 5398 5399 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5400 CU_ASSERT(rc == 0); 5401 SPDK_CU_ASSERT_FATAL(desc != NULL); 5402 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5403 io_ch = spdk_bdev_get_io_channel(desc); 5404 CU_ASSERT(io_ch != NULL); 5405 5406 /* Test invalid ext_opts size */ 5407 ext_io_opts.size = 0; 5408 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5409 CU_ASSERT(rc == -EINVAL); 5410 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5411 CU_ASSERT(rc == -EINVAL); 5412 5413 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5414 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5415 CU_ASSERT(rc == -EINVAL); 5416 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5417 CU_ASSERT(rc == -EINVAL); 5418 5419 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5420 sizeof(ext_io_opts.metadata) - 1; 5421 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5422 CU_ASSERT(rc == -EINVAL); 5423 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5424 CU_ASSERT(rc == -EINVAL); 5425 5426 spdk_put_io_channel(io_ch); 5427 spdk_bdev_close(desc); 5428 free_bdev(bdev); 5429 spdk_bdev_finish(bdev_fini_cb, NULL); 5430 poll_threads(); 5431 } 5432 5433 static void 5434 bdev_io_ext_split(void) 5435 { 5436 struct spdk_bdev *bdev; 5437 struct spdk_bdev_desc *desc = NULL; 5438 struct spdk_io_channel *io_ch; 5439 char io_buf[512]; 5440 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5441 struct ut_expected_io *expected_io; 5442 struct spdk_bdev_ext_io_opts ext_io_opts = { 5443 .metadata = (void *)0xFF000000, 5444 .size = sizeof(ext_io_opts) 5445 }; 5446 int rc; 5447 5448 spdk_bdev_initialize(bdev_init_cb, NULL); 5449 5450 bdev = allocate_bdev("bdev0"); 5451 bdev->md_interleave = false; 5452 bdev->md_len = 8; 5453 5454 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5455 CU_ASSERT(rc == 0); 5456 SPDK_CU_ASSERT_FATAL(desc != NULL); 5457 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5458 io_ch = spdk_bdev_get_io_channel(desc); 5459 CU_ASSERT(io_ch != NULL); 5460 5461 /* Check that IO request with ext_opts and metadata is split correctly 5462 * Offset 14, length 8, payload 0xF000 5463 * Child - Offset 14, length 2, payload 0xF000 5464 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5465 */ 5466 bdev->optimal_io_boundary = 16; 5467 bdev->split_on_optimal_io_boundary = true; 5468 bdev->md_interleave = false; 5469 bdev->md_len = 8; 5470 5471 iov.iov_base = (void *)0xF000; 5472 iov.iov_len = 4096; 5473 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5474 ext_io_opts.metadata = (void *)0xFF000000; 5475 ext_io_opts.size = sizeof(ext_io_opts); 5476 g_io_done = false; 5477 5478 /* read */ 5479 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5480 expected_io->md_buf = ext_io_opts.metadata; 5481 expected_io->ext_io_opts = &ext_io_opts; 5482 expected_io->copy_opts = true; 5483 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5484 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5485 5486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5487 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5488 expected_io->ext_io_opts = &ext_io_opts; 5489 expected_io->copy_opts = true; 5490 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5491 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5492 5493 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5494 CU_ASSERT(rc == 0); 5495 CU_ASSERT(g_io_done == false); 5496 5497 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5498 stub_complete_io(2); 5499 CU_ASSERT(g_io_done == true); 5500 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5501 5502 /* write */ 5503 g_io_done = false; 5504 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5505 expected_io->md_buf = ext_io_opts.metadata; 5506 expected_io->ext_io_opts = &ext_io_opts; 5507 expected_io->copy_opts = true; 5508 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5510 5511 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5512 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5513 expected_io->ext_io_opts = &ext_io_opts; 5514 expected_io->copy_opts = true; 5515 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5516 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5517 5518 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5519 CU_ASSERT(rc == 0); 5520 CU_ASSERT(g_io_done == false); 5521 5522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5523 stub_complete_io(2); 5524 CU_ASSERT(g_io_done == true); 5525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5526 5527 spdk_put_io_channel(io_ch); 5528 spdk_bdev_close(desc); 5529 free_bdev(bdev); 5530 spdk_bdev_finish(bdev_fini_cb, NULL); 5531 poll_threads(); 5532 } 5533 5534 static void 5535 bdev_io_ext_bounce_buffer(void) 5536 { 5537 struct spdk_bdev *bdev; 5538 struct spdk_bdev_desc *desc = NULL; 5539 struct spdk_io_channel *io_ch; 5540 char io_buf[512]; 5541 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5542 struct ut_expected_io *expected_io; 5543 struct spdk_bdev_ext_io_opts ext_io_opts = { 5544 .metadata = (void *)0xFF000000, 5545 .size = sizeof(ext_io_opts) 5546 }; 5547 int rc; 5548 5549 spdk_bdev_initialize(bdev_init_cb, NULL); 5550 5551 bdev = allocate_bdev("bdev0"); 5552 bdev->md_interleave = false; 5553 bdev->md_len = 8; 5554 5555 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5556 CU_ASSERT(rc == 0); 5557 SPDK_CU_ASSERT_FATAL(desc != NULL); 5558 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5559 io_ch = spdk_bdev_get_io_channel(desc); 5560 CU_ASSERT(io_ch != NULL); 5561 5562 /* Verify data pull/push 5563 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5564 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5565 5566 /* read */ 5567 g_io_done = false; 5568 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5569 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5570 expected_io->ext_io_opts = &ext_io_opts; 5571 expected_io->copy_opts = true; 5572 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5573 5574 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5575 5576 CU_ASSERT(rc == 0); 5577 CU_ASSERT(g_io_done == false); 5578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5579 stub_complete_io(1); 5580 CU_ASSERT(g_memory_domain_push_data_called == true); 5581 CU_ASSERT(g_io_done == true); 5582 5583 /* write */ 5584 g_io_done = false; 5585 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5586 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5587 expected_io->ext_io_opts = &ext_io_opts; 5588 expected_io->copy_opts = true; 5589 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5590 5591 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5592 5593 CU_ASSERT(rc == 0); 5594 CU_ASSERT(g_memory_domain_pull_data_called == true); 5595 CU_ASSERT(g_io_done == false); 5596 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5597 stub_complete_io(1); 5598 CU_ASSERT(g_io_done == true); 5599 5600 spdk_put_io_channel(io_ch); 5601 spdk_bdev_close(desc); 5602 free_bdev(bdev); 5603 spdk_bdev_finish(bdev_fini_cb, NULL); 5604 poll_threads(); 5605 } 5606 5607 static void 5608 bdev_register_uuid_alias(void) 5609 { 5610 struct spdk_bdev *bdev, *second; 5611 char uuid[SPDK_UUID_STRING_LEN]; 5612 int rc; 5613 5614 spdk_bdev_initialize(bdev_init_cb, NULL); 5615 bdev = allocate_bdev("bdev0"); 5616 5617 /* Make sure an UUID was generated */ 5618 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5619 5620 /* Check that an UUID alias was registered */ 5621 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5622 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5623 5624 /* Unregister the bdev */ 5625 spdk_bdev_unregister(bdev, NULL, NULL); 5626 poll_threads(); 5627 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5628 5629 /* Check the same, but this time register the bdev with non-zero UUID */ 5630 rc = spdk_bdev_register(bdev); 5631 CU_ASSERT_EQUAL(rc, 0); 5632 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5633 5634 /* Unregister the bdev */ 5635 spdk_bdev_unregister(bdev, NULL, NULL); 5636 poll_threads(); 5637 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5638 5639 /* Regiser the bdev using UUID as the name */ 5640 bdev->name = uuid; 5641 rc = spdk_bdev_register(bdev); 5642 CU_ASSERT_EQUAL(rc, 0); 5643 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5644 5645 /* Unregister the bdev */ 5646 spdk_bdev_unregister(bdev, NULL, NULL); 5647 poll_threads(); 5648 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5649 5650 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5651 bdev->name = "bdev0"; 5652 second = allocate_bdev("bdev1"); 5653 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5654 rc = spdk_bdev_register(bdev); 5655 CU_ASSERT_EQUAL(rc, -EEXIST); 5656 5657 /* Regenerate the UUID and re-check */ 5658 spdk_uuid_generate(&bdev->uuid); 5659 rc = spdk_bdev_register(bdev); 5660 CU_ASSERT_EQUAL(rc, 0); 5661 5662 /* And check that both bdevs can be retrieved through their UUIDs */ 5663 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5664 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5665 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5666 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5667 5668 free_bdev(second); 5669 free_bdev(bdev); 5670 spdk_bdev_finish(bdev_fini_cb, NULL); 5671 poll_threads(); 5672 } 5673 5674 static void 5675 bdev_unregister_by_name(void) 5676 { 5677 struct spdk_bdev *bdev; 5678 int rc; 5679 5680 bdev = allocate_bdev("bdev"); 5681 5682 g_event_type1 = 0xFF; 5683 g_unregister_arg = NULL; 5684 g_unregister_rc = -1; 5685 5686 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5687 CU_ASSERT(rc == -ENODEV); 5688 5689 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5690 CU_ASSERT(rc == -ENODEV); 5691 5692 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5693 CU_ASSERT(rc == 0); 5694 5695 /* Check that unregister callback is delayed */ 5696 CU_ASSERT(g_unregister_arg == NULL); 5697 CU_ASSERT(g_unregister_rc == -1); 5698 5699 poll_threads(); 5700 5701 /* Event callback shall not be issued because device was closed */ 5702 CU_ASSERT(g_event_type1 == 0xFF); 5703 /* Unregister callback is issued */ 5704 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5705 CU_ASSERT(g_unregister_rc == 0); 5706 5707 free_bdev(bdev); 5708 } 5709 5710 static int 5711 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5712 { 5713 int *count = ctx; 5714 5715 (*count)++; 5716 5717 return 0; 5718 } 5719 5720 static void 5721 for_each_bdev_test(void) 5722 { 5723 struct spdk_bdev *bdev[8]; 5724 int rc, count; 5725 5726 bdev[0] = allocate_bdev("bdev0"); 5727 5728 bdev[1] = allocate_bdev("bdev1"); 5729 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5730 CU_ASSERT(rc == 0); 5731 5732 bdev[2] = allocate_bdev("bdev2"); 5733 5734 bdev[3] = allocate_bdev("bdev3"); 5735 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5736 CU_ASSERT(rc == 0); 5737 5738 bdev[4] = allocate_bdev("bdev4"); 5739 5740 bdev[5] = allocate_bdev("bdev5"); 5741 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5742 CU_ASSERT(rc == 0); 5743 5744 bdev[6] = allocate_bdev("bdev6"); 5745 5746 bdev[7] = allocate_bdev("bdev7"); 5747 5748 count = 0; 5749 rc = spdk_for_each_bdev(&count, count_bdevs); 5750 CU_ASSERT(rc == 0); 5751 CU_ASSERT(count == 8); 5752 5753 count = 0; 5754 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5755 CU_ASSERT(rc == 0); 5756 CU_ASSERT(count == 5); 5757 5758 free_bdev(bdev[0]); 5759 free_bdev(bdev[1]); 5760 free_bdev(bdev[2]); 5761 free_bdev(bdev[3]); 5762 free_bdev(bdev[4]); 5763 free_bdev(bdev[5]); 5764 free_bdev(bdev[6]); 5765 free_bdev(bdev[7]); 5766 } 5767 5768 int 5769 main(int argc, char **argv) 5770 { 5771 CU_pSuite suite = NULL; 5772 unsigned int num_failures; 5773 5774 CU_set_error_action(CUEA_ABORT); 5775 CU_initialize_registry(); 5776 5777 suite = CU_add_suite("bdev", null_init, null_clean); 5778 5779 CU_ADD_TEST(suite, bytes_to_blocks_test); 5780 CU_ADD_TEST(suite, num_blocks_test); 5781 CU_ADD_TEST(suite, io_valid_test); 5782 CU_ADD_TEST(suite, open_write_test); 5783 CU_ADD_TEST(suite, claim_test); 5784 CU_ADD_TEST(suite, alias_add_del_test); 5785 CU_ADD_TEST(suite, get_device_stat_test); 5786 CU_ADD_TEST(suite, bdev_io_types_test); 5787 CU_ADD_TEST(suite, bdev_io_wait_test); 5788 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5789 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5790 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5791 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5792 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5793 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5794 CU_ADD_TEST(suite, bdev_io_alignment); 5795 CU_ADD_TEST(suite, bdev_histograms); 5796 CU_ADD_TEST(suite, bdev_write_zeroes); 5797 CU_ADD_TEST(suite, bdev_compare_and_write); 5798 CU_ADD_TEST(suite, bdev_compare); 5799 CU_ADD_TEST(suite, bdev_compare_emulated); 5800 CU_ADD_TEST(suite, bdev_zcopy_write); 5801 CU_ADD_TEST(suite, bdev_zcopy_read); 5802 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5803 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5804 CU_ADD_TEST(suite, bdev_open_ext); 5805 CU_ADD_TEST(suite, bdev_open_ext_unregister); 5806 CU_ADD_TEST(suite, bdev_set_io_timeout); 5807 CU_ADD_TEST(suite, bdev_set_qd_sampling); 5808 CU_ADD_TEST(suite, lba_range_overlap); 5809 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5810 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5811 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5812 CU_ADD_TEST(suite, bdev_io_abort); 5813 CU_ADD_TEST(suite, bdev_unmap); 5814 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5815 CU_ADD_TEST(suite, bdev_set_options_test); 5816 CU_ADD_TEST(suite, bdev_multi_allocation); 5817 CU_ADD_TEST(suite, bdev_get_memory_domains); 5818 CU_ADD_TEST(suite, bdev_io_ext); 5819 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 5820 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 5821 CU_ADD_TEST(suite, bdev_io_ext_split); 5822 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 5823 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5824 CU_ADD_TEST(suite, bdev_unregister_by_name); 5825 CU_ADD_TEST(suite, for_each_bdev_test); 5826 5827 allocate_cores(1); 5828 allocate_threads(1); 5829 set_thread(0); 5830 5831 CU_basic_set_mode(CU_BRM_VERBOSE); 5832 CU_basic_run_tests(); 5833 num_failures = CU_get_number_of_failures(); 5834 CU_cleanup_registry(); 5835 5836 free_threads(); 5837 free_cores(); 5838 5839 return num_failures; 5840 } 5841