1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t length; 90 int iovcnt; 91 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 92 void *md_buf; 93 struct spdk_bdev_ext_io_opts *ext_io_opts; 94 bool copy_opts; 95 TAILQ_ENTRY(ut_expected_io) link; 96 }; 97 98 struct bdev_ut_channel { 99 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 100 uint32_t outstanding_io_count; 101 TAILQ_HEAD(, ut_expected_io) expected_io; 102 }; 103 104 static bool g_io_done; 105 static struct spdk_bdev_io *g_bdev_io; 106 static enum spdk_bdev_io_status g_io_status; 107 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 108 static uint32_t g_bdev_ut_io_device; 109 static struct bdev_ut_channel *g_bdev_ut_channel; 110 static void *g_compare_read_buf; 111 static uint32_t g_compare_read_buf_len; 112 static void *g_compare_write_buf; 113 static uint32_t g_compare_write_buf_len; 114 static void *g_compare_md_buf; 115 static bool g_abort_done; 116 static enum spdk_bdev_io_status g_abort_status; 117 static void *g_zcopy_read_buf; 118 static uint32_t g_zcopy_read_buf_len; 119 static void *g_zcopy_write_buf; 120 static uint32_t g_zcopy_write_buf_len; 121 static struct spdk_bdev_io *g_zcopy_bdev_io; 122 static uint64_t g_seek_data_offset; 123 static uint64_t g_seek_hole_offset; 124 static uint64_t g_seek_offset; 125 126 static struct ut_expected_io * 127 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 128 { 129 struct ut_expected_io *expected_io; 130 131 expected_io = calloc(1, sizeof(*expected_io)); 132 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 133 134 expected_io->type = type; 135 expected_io->offset = offset; 136 expected_io->length = length; 137 expected_io->iovcnt = iovcnt; 138 139 return expected_io; 140 } 141 142 static void 143 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 144 { 145 expected_io->iov[pos].iov_base = base; 146 expected_io->iov[pos].iov_len = len; 147 } 148 149 static void 150 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 151 { 152 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 153 struct ut_expected_io *expected_io; 154 struct iovec *iov, *expected_iov; 155 struct spdk_bdev_io *bio_to_abort; 156 int i; 157 158 g_bdev_io = bdev_io; 159 160 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 161 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 162 163 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 164 CU_ASSERT(g_compare_read_buf_len == len); 165 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 166 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 167 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 168 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 169 } 170 } 171 172 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 173 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 174 175 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 176 CU_ASSERT(g_compare_write_buf_len == len); 177 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 178 } 179 180 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 181 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 182 183 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 184 CU_ASSERT(g_compare_read_buf_len == len); 185 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 186 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 187 } 188 if (bdev_io->u.bdev.md_buf && 189 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 190 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 191 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 192 } 193 } 194 195 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 196 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 197 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 198 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 199 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 200 ch->outstanding_io_count--; 201 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 202 break; 203 } 204 } 205 } 206 } 207 208 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 209 if (bdev_io->u.bdev.zcopy.start) { 210 g_zcopy_bdev_io = bdev_io; 211 if (bdev_io->u.bdev.zcopy.populate) { 212 /* Start of a read */ 213 CU_ASSERT(g_zcopy_read_buf != NULL); 214 CU_ASSERT(g_zcopy_read_buf_len > 0); 215 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 216 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 217 bdev_io->u.bdev.iovcnt = 1; 218 } else { 219 /* Start of a write */ 220 CU_ASSERT(g_zcopy_write_buf != NULL); 221 CU_ASSERT(g_zcopy_write_buf_len > 0); 222 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 223 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 224 bdev_io->u.bdev.iovcnt = 1; 225 } 226 } else { 227 if (bdev_io->u.bdev.zcopy.commit) { 228 /* End of write */ 229 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 230 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 231 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 232 g_zcopy_write_buf = NULL; 233 g_zcopy_write_buf_len = 0; 234 } else { 235 /* End of read */ 236 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 237 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 238 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 239 g_zcopy_read_buf = NULL; 240 g_zcopy_read_buf_len = 0; 241 } 242 } 243 } 244 245 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 246 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 247 } 248 249 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 250 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 251 } 252 253 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 254 ch->outstanding_io_count++; 255 256 expected_io = TAILQ_FIRST(&ch->expected_io); 257 if (expected_io == NULL) { 258 return; 259 } 260 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 261 262 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 263 CU_ASSERT(bdev_io->type == expected_io->type); 264 } 265 266 if (expected_io->md_buf != NULL) { 267 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 268 if (bdev_io->u.bdev.ext_opts) { 269 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 270 } 271 } 272 273 if (expected_io->copy_opts) { 274 if (expected_io->ext_io_opts) { 275 /* opts are not NULL so it should have been copied */ 276 CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts); 277 CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy); 278 /* internal opts always points to opts passed */ 279 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts); 280 } else { 281 /* passed opts was NULL so we expect bdev_io opts to be NULL */ 282 CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL); 283 } 284 } else { 285 /* opts were not copied so they should be equal */ 286 CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts); 287 } 288 289 if (expected_io->length == 0) { 290 free(expected_io); 291 return; 292 } 293 294 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 295 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 296 297 if (expected_io->iovcnt == 0) { 298 free(expected_io); 299 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 300 return; 301 } 302 303 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 304 for (i = 0; i < expected_io->iovcnt; i++) { 305 expected_iov = &expected_io->iov[i]; 306 if (bdev_io->internal.orig_iovcnt == 0) { 307 iov = &bdev_io->u.bdev.iovs[i]; 308 } else { 309 iov = bdev_io->internal.orig_iovs; 310 } 311 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 312 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 313 } 314 315 free(expected_io); 316 } 317 318 static void 319 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 320 struct spdk_bdev_io *bdev_io, bool success) 321 { 322 CU_ASSERT(success == true); 323 324 stub_submit_request(_ch, bdev_io); 325 } 326 327 static void 328 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 329 { 330 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 331 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 332 } 333 334 static uint32_t 335 stub_complete_io(uint32_t num_to_complete) 336 { 337 struct bdev_ut_channel *ch = g_bdev_ut_channel; 338 struct spdk_bdev_io *bdev_io; 339 static enum spdk_bdev_io_status io_status; 340 uint32_t num_completed = 0; 341 342 while (num_completed < num_to_complete) { 343 if (TAILQ_EMPTY(&ch->outstanding_io)) { 344 break; 345 } 346 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 347 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 348 ch->outstanding_io_count--; 349 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 350 g_io_exp_status; 351 spdk_bdev_io_complete(bdev_io, io_status); 352 num_completed++; 353 } 354 355 return num_completed; 356 } 357 358 static struct spdk_io_channel * 359 bdev_ut_get_io_channel(void *ctx) 360 { 361 return spdk_get_io_channel(&g_bdev_ut_io_device); 362 } 363 364 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 365 [SPDK_BDEV_IO_TYPE_READ] = true, 366 [SPDK_BDEV_IO_TYPE_WRITE] = true, 367 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 368 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 369 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 370 [SPDK_BDEV_IO_TYPE_RESET] = true, 371 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 372 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 373 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 374 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 375 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 376 [SPDK_BDEV_IO_TYPE_ABORT] = true, 377 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 378 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 379 }; 380 381 static void 382 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 383 { 384 g_io_types_supported[io_type] = enable; 385 } 386 387 static bool 388 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 389 { 390 return g_io_types_supported[io_type]; 391 } 392 393 static struct spdk_bdev_fn_table fn_table = { 394 .destruct = stub_destruct, 395 .submit_request = stub_submit_request, 396 .get_io_channel = bdev_ut_get_io_channel, 397 .io_type_supported = stub_io_type_supported, 398 }; 399 400 static int 401 bdev_ut_create_ch(void *io_device, void *ctx_buf) 402 { 403 struct bdev_ut_channel *ch = ctx_buf; 404 405 CU_ASSERT(g_bdev_ut_channel == NULL); 406 g_bdev_ut_channel = ch; 407 408 TAILQ_INIT(&ch->outstanding_io); 409 ch->outstanding_io_count = 0; 410 TAILQ_INIT(&ch->expected_io); 411 return 0; 412 } 413 414 static void 415 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 416 { 417 CU_ASSERT(g_bdev_ut_channel != NULL); 418 g_bdev_ut_channel = NULL; 419 } 420 421 struct spdk_bdev_module bdev_ut_if; 422 423 static int 424 bdev_ut_module_init(void) 425 { 426 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 427 sizeof(struct bdev_ut_channel), NULL); 428 spdk_bdev_module_init_done(&bdev_ut_if); 429 return 0; 430 } 431 432 static void 433 bdev_ut_module_fini(void) 434 { 435 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 436 } 437 438 struct spdk_bdev_module bdev_ut_if = { 439 .name = "bdev_ut", 440 .module_init = bdev_ut_module_init, 441 .module_fini = bdev_ut_module_fini, 442 .async_init = true, 443 }; 444 445 static void vbdev_ut_examine(struct spdk_bdev *bdev); 446 447 static int 448 vbdev_ut_module_init(void) 449 { 450 return 0; 451 } 452 453 static void 454 vbdev_ut_module_fini(void) 455 { 456 } 457 458 struct spdk_bdev_module vbdev_ut_if = { 459 .name = "vbdev_ut", 460 .module_init = vbdev_ut_module_init, 461 .module_fini = vbdev_ut_module_fini, 462 .examine_config = vbdev_ut_examine, 463 }; 464 465 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 466 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 467 468 static void 469 vbdev_ut_examine(struct spdk_bdev *bdev) 470 { 471 spdk_bdev_module_examine_done(&vbdev_ut_if); 472 } 473 474 static struct spdk_bdev * 475 allocate_bdev(char *name) 476 { 477 struct spdk_bdev *bdev; 478 int rc; 479 480 bdev = calloc(1, sizeof(*bdev)); 481 SPDK_CU_ASSERT_FATAL(bdev != NULL); 482 483 bdev->name = name; 484 bdev->fn_table = &fn_table; 485 bdev->module = &bdev_ut_if; 486 bdev->blockcnt = 1024; 487 bdev->blocklen = 512; 488 489 rc = spdk_bdev_register(bdev); 490 poll_threads(); 491 CU_ASSERT(rc == 0); 492 493 return bdev; 494 } 495 496 static struct spdk_bdev * 497 allocate_vbdev(char *name) 498 { 499 struct spdk_bdev *bdev; 500 int rc; 501 502 bdev = calloc(1, sizeof(*bdev)); 503 SPDK_CU_ASSERT_FATAL(bdev != NULL); 504 505 bdev->name = name; 506 bdev->fn_table = &fn_table; 507 bdev->module = &vbdev_ut_if; 508 509 rc = spdk_bdev_register(bdev); 510 poll_threads(); 511 CU_ASSERT(rc == 0); 512 513 return bdev; 514 } 515 516 static void 517 free_bdev(struct spdk_bdev *bdev) 518 { 519 spdk_bdev_unregister(bdev, NULL, NULL); 520 poll_threads(); 521 memset(bdev, 0xFF, sizeof(*bdev)); 522 free(bdev); 523 } 524 525 static void 526 free_vbdev(struct spdk_bdev *bdev) 527 { 528 spdk_bdev_unregister(bdev, NULL, NULL); 529 poll_threads(); 530 memset(bdev, 0xFF, sizeof(*bdev)); 531 free(bdev); 532 } 533 534 static void 535 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 536 { 537 const char *bdev_name; 538 539 CU_ASSERT(bdev != NULL); 540 CU_ASSERT(rc == 0); 541 bdev_name = spdk_bdev_get_name(bdev); 542 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 543 544 free(stat); 545 546 *(bool *)cb_arg = true; 547 } 548 549 static void 550 bdev_unregister_cb(void *cb_arg, int rc) 551 { 552 g_unregister_arg = cb_arg; 553 g_unregister_rc = rc; 554 } 555 556 static void 557 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 558 { 559 } 560 561 static void 562 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 563 { 564 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 565 566 g_event_type1 = type; 567 if (SPDK_BDEV_EVENT_REMOVE == type) { 568 spdk_bdev_close(desc); 569 } 570 } 571 572 static void 573 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 574 { 575 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 576 577 g_event_type2 = type; 578 if (SPDK_BDEV_EVENT_REMOVE == type) { 579 spdk_bdev_close(desc); 580 } 581 } 582 583 static void 584 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 585 { 586 g_event_type3 = type; 587 } 588 589 static void 590 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 591 { 592 g_event_type4 = type; 593 } 594 595 static void 596 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 597 { 598 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 599 spdk_bdev_free_io(bdev_io); 600 } 601 602 static void 603 get_device_stat_test(void) 604 { 605 struct spdk_bdev *bdev; 606 struct spdk_bdev_io_stat *stat; 607 bool done; 608 609 bdev = allocate_bdev("bdev0"); 610 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 611 if (stat == NULL) { 612 free_bdev(bdev); 613 return; 614 } 615 616 done = false; 617 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 618 while (!done) { poll_threads(); } 619 620 free_bdev(bdev); 621 } 622 623 static void 624 open_write_test(void) 625 { 626 struct spdk_bdev *bdev[9]; 627 struct spdk_bdev_desc *desc[9] = {}; 628 int rc; 629 630 /* 631 * Create a tree of bdevs to test various open w/ write cases. 632 * 633 * bdev0 through bdev3 are physical block devices, such as NVMe 634 * namespaces or Ceph block devices. 635 * 636 * bdev4 is a virtual bdev with multiple base bdevs. This models 637 * caching or RAID use cases. 638 * 639 * bdev5 through bdev7 are all virtual bdevs with the same base 640 * bdev (except bdev7). This models partitioning or logical volume 641 * use cases. 642 * 643 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 644 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 645 * models caching, RAID, partitioning or logical volumes use cases. 646 * 647 * bdev8 is a virtual bdev with multiple base bdevs, but these 648 * base bdevs are themselves virtual bdevs. 649 * 650 * bdev8 651 * | 652 * +----------+ 653 * | | 654 * bdev4 bdev5 bdev6 bdev7 655 * | | | | 656 * +---+---+ +---+ + +---+---+ 657 * | | \ | / \ 658 * bdev0 bdev1 bdev2 bdev3 659 */ 660 661 bdev[0] = allocate_bdev("bdev0"); 662 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 663 CU_ASSERT(rc == 0); 664 665 bdev[1] = allocate_bdev("bdev1"); 666 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 667 CU_ASSERT(rc == 0); 668 669 bdev[2] = allocate_bdev("bdev2"); 670 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 671 CU_ASSERT(rc == 0); 672 673 bdev[3] = allocate_bdev("bdev3"); 674 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 675 CU_ASSERT(rc == 0); 676 677 bdev[4] = allocate_vbdev("bdev4"); 678 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 679 CU_ASSERT(rc == 0); 680 681 bdev[5] = allocate_vbdev("bdev5"); 682 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 683 CU_ASSERT(rc == 0); 684 685 bdev[6] = allocate_vbdev("bdev6"); 686 687 bdev[7] = allocate_vbdev("bdev7"); 688 689 bdev[8] = allocate_vbdev("bdev8"); 690 691 /* Open bdev0 read-only. This should succeed. */ 692 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 693 CU_ASSERT(rc == 0); 694 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 695 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 696 spdk_bdev_close(desc[0]); 697 698 /* 699 * Open bdev1 read/write. This should fail since bdev1 has been claimed 700 * by a vbdev module. 701 */ 702 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 703 CU_ASSERT(rc == -EPERM); 704 705 /* 706 * Open bdev4 read/write. This should fail since bdev3 has been claimed 707 * by a vbdev module. 708 */ 709 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 710 CU_ASSERT(rc == -EPERM); 711 712 /* Open bdev4 read-only. This should succeed. */ 713 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 714 CU_ASSERT(rc == 0); 715 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 716 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 717 spdk_bdev_close(desc[4]); 718 719 /* 720 * Open bdev8 read/write. This should succeed since it is a leaf 721 * bdev. 722 */ 723 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 724 CU_ASSERT(rc == 0); 725 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 726 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 727 spdk_bdev_close(desc[8]); 728 729 /* 730 * Open bdev5 read/write. This should fail since bdev4 has been claimed 731 * by a vbdev module. 732 */ 733 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 734 CU_ASSERT(rc == -EPERM); 735 736 /* Open bdev4 read-only. This should succeed. */ 737 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 738 CU_ASSERT(rc == 0); 739 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 740 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 741 spdk_bdev_close(desc[5]); 742 743 free_vbdev(bdev[8]); 744 745 free_vbdev(bdev[5]); 746 free_vbdev(bdev[6]); 747 free_vbdev(bdev[7]); 748 749 free_vbdev(bdev[4]); 750 751 free_bdev(bdev[0]); 752 free_bdev(bdev[1]); 753 free_bdev(bdev[2]); 754 free_bdev(bdev[3]); 755 } 756 757 static void 758 claim_test(void) 759 { 760 struct spdk_bdev *bdev; 761 struct spdk_bdev_desc *desc, *open_desc; 762 int rc; 763 uint32_t count; 764 765 /* 766 * A vbdev that uses a read-only bdev may need it to remain read-only. 767 * To do so, it opens the bdev read-only, then claims it without 768 * passing a spdk_bdev_desc. 769 */ 770 bdev = allocate_bdev("bdev0"); 771 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 772 CU_ASSERT(rc == 0); 773 CU_ASSERT(desc->write == false); 774 775 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 776 CU_ASSERT(rc == 0); 777 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 778 779 /* There should be only one open descriptor and it should still be ro */ 780 count = 0; 781 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 782 CU_ASSERT(open_desc == desc); 783 CU_ASSERT(!open_desc->write); 784 count++; 785 } 786 CU_ASSERT(count == 1); 787 788 /* A read-only bdev is upgraded to read-write if desc is passed. */ 789 spdk_bdev_module_release_bdev(bdev); 790 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 791 CU_ASSERT(rc == 0); 792 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 793 794 /* There should be only one open descriptor and it should be rw */ 795 count = 0; 796 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 797 CU_ASSERT(open_desc == desc); 798 CU_ASSERT(open_desc->write); 799 count++; 800 } 801 CU_ASSERT(count == 1); 802 803 spdk_bdev_close(desc); 804 free_bdev(bdev); 805 } 806 807 static void 808 bytes_to_blocks_test(void) 809 { 810 struct spdk_bdev bdev; 811 uint64_t offset_blocks, num_blocks; 812 813 memset(&bdev, 0, sizeof(bdev)); 814 815 bdev.blocklen = 512; 816 817 /* All parameters valid */ 818 offset_blocks = 0; 819 num_blocks = 0; 820 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 821 CU_ASSERT(offset_blocks == 1); 822 CU_ASSERT(num_blocks == 2); 823 824 /* Offset not a block multiple */ 825 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 826 827 /* Length not a block multiple */ 828 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 829 830 /* In case blocklen not the power of two */ 831 bdev.blocklen = 100; 832 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 833 CU_ASSERT(offset_blocks == 1); 834 CU_ASSERT(num_blocks == 2); 835 836 /* Offset not a block multiple */ 837 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 838 839 /* Length not a block multiple */ 840 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 841 } 842 843 static void 844 num_blocks_test(void) 845 { 846 struct spdk_bdev bdev; 847 struct spdk_bdev_desc *desc = NULL; 848 int rc; 849 850 memset(&bdev, 0, sizeof(bdev)); 851 bdev.name = "num_blocks"; 852 bdev.fn_table = &fn_table; 853 bdev.module = &bdev_ut_if; 854 spdk_bdev_register(&bdev); 855 poll_threads(); 856 spdk_bdev_notify_blockcnt_change(&bdev, 50); 857 858 /* Growing block number */ 859 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 860 /* Shrinking block number */ 861 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 862 863 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 864 CU_ASSERT(rc == 0); 865 SPDK_CU_ASSERT_FATAL(desc != NULL); 866 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 867 868 /* Growing block number */ 869 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 870 /* Shrinking block number */ 871 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 872 873 g_event_type1 = 0xFF; 874 /* Growing block number */ 875 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 876 877 poll_threads(); 878 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 879 880 g_event_type1 = 0xFF; 881 /* Growing block number and closing */ 882 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 883 884 spdk_bdev_close(desc); 885 spdk_bdev_unregister(&bdev, NULL, NULL); 886 887 poll_threads(); 888 889 /* Callback is not called for closed device */ 890 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 891 } 892 893 static void 894 io_valid_test(void) 895 { 896 struct spdk_bdev bdev; 897 898 memset(&bdev, 0, sizeof(bdev)); 899 900 bdev.blocklen = 512; 901 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 902 903 spdk_bdev_notify_blockcnt_change(&bdev, 100); 904 905 /* All parameters valid */ 906 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 907 908 /* Last valid block */ 909 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 910 911 /* Offset past end of bdev */ 912 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 913 914 /* Offset + length past end of bdev */ 915 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 916 917 /* Offset near end of uint64_t range (2^64 - 1) */ 918 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 919 920 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 921 } 922 923 static void 924 alias_add_del_test(void) 925 { 926 struct spdk_bdev *bdev[3]; 927 int rc; 928 929 /* Creating and registering bdevs */ 930 bdev[0] = allocate_bdev("bdev0"); 931 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 932 933 bdev[1] = allocate_bdev("bdev1"); 934 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 935 936 bdev[2] = allocate_bdev("bdev2"); 937 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 938 939 poll_threads(); 940 941 /* 942 * Trying adding an alias identical to name. 943 * Alias is identical to name, so it can not be added to aliases list 944 */ 945 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 946 CU_ASSERT(rc == -EEXIST); 947 948 /* 949 * Trying to add empty alias, 950 * this one should fail 951 */ 952 rc = spdk_bdev_alias_add(bdev[0], NULL); 953 CU_ASSERT(rc == -EINVAL); 954 955 /* Trying adding same alias to two different registered bdevs */ 956 957 /* Alias is used first time, so this one should pass */ 958 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 959 CU_ASSERT(rc == 0); 960 961 /* Alias was added to another bdev, so this one should fail */ 962 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 963 CU_ASSERT(rc == -EEXIST); 964 965 /* Alias is used first time, so this one should pass */ 966 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 967 CU_ASSERT(rc == 0); 968 969 /* Trying removing an alias from registered bdevs */ 970 971 /* Alias is not on a bdev aliases list, so this one should fail */ 972 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 973 CU_ASSERT(rc == -ENOENT); 974 975 /* Alias is present on a bdev aliases list, so this one should pass */ 976 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 977 CU_ASSERT(rc == 0); 978 979 /* Alias is present on a bdev aliases list, so this one should pass */ 980 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 981 CU_ASSERT(rc == 0); 982 983 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 984 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 985 CU_ASSERT(rc != 0); 986 987 /* Trying to del all alias from empty alias list */ 988 spdk_bdev_alias_del_all(bdev[2]); 989 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 990 991 /* Trying to del all alias from non-empty alias list */ 992 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 993 CU_ASSERT(rc == 0); 994 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 995 CU_ASSERT(rc == 0); 996 spdk_bdev_alias_del_all(bdev[2]); 997 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 998 999 /* Unregister and free bdevs */ 1000 spdk_bdev_unregister(bdev[0], NULL, NULL); 1001 spdk_bdev_unregister(bdev[1], NULL, NULL); 1002 spdk_bdev_unregister(bdev[2], NULL, NULL); 1003 1004 poll_threads(); 1005 1006 free(bdev[0]); 1007 free(bdev[1]); 1008 free(bdev[2]); 1009 } 1010 1011 static void 1012 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1013 { 1014 g_io_done = true; 1015 g_io_status = bdev_io->internal.status; 1016 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1017 (bdev_io->u.bdev.zcopy.start)) { 1018 g_zcopy_bdev_io = bdev_io; 1019 } else { 1020 spdk_bdev_free_io(bdev_io); 1021 g_zcopy_bdev_io = NULL; 1022 } 1023 } 1024 1025 static void 1026 bdev_init_cb(void *arg, int rc) 1027 { 1028 CU_ASSERT(rc == 0); 1029 } 1030 1031 static void 1032 bdev_fini_cb(void *arg) 1033 { 1034 } 1035 1036 struct bdev_ut_io_wait_entry { 1037 struct spdk_bdev_io_wait_entry entry; 1038 struct spdk_io_channel *io_ch; 1039 struct spdk_bdev_desc *desc; 1040 bool submitted; 1041 }; 1042 1043 static void 1044 io_wait_cb(void *arg) 1045 { 1046 struct bdev_ut_io_wait_entry *entry = arg; 1047 int rc; 1048 1049 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1050 CU_ASSERT(rc == 0); 1051 entry->submitted = true; 1052 } 1053 1054 static void 1055 bdev_io_types_test(void) 1056 { 1057 struct spdk_bdev *bdev; 1058 struct spdk_bdev_desc *desc = NULL; 1059 struct spdk_io_channel *io_ch; 1060 struct spdk_bdev_opts bdev_opts = {}; 1061 int rc; 1062 1063 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1064 bdev_opts.bdev_io_pool_size = 4; 1065 bdev_opts.bdev_io_cache_size = 2; 1066 1067 rc = spdk_bdev_set_opts(&bdev_opts); 1068 CU_ASSERT(rc == 0); 1069 spdk_bdev_initialize(bdev_init_cb, NULL); 1070 poll_threads(); 1071 1072 bdev = allocate_bdev("bdev0"); 1073 1074 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1075 CU_ASSERT(rc == 0); 1076 poll_threads(); 1077 SPDK_CU_ASSERT_FATAL(desc != NULL); 1078 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1079 io_ch = spdk_bdev_get_io_channel(desc); 1080 CU_ASSERT(io_ch != NULL); 1081 1082 /* WRITE and WRITE ZEROES are not supported */ 1083 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1084 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1085 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1086 CU_ASSERT(rc == -ENOTSUP); 1087 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1088 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1089 1090 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1091 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1092 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1093 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1094 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1095 CU_ASSERT(rc == -ENOTSUP); 1096 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1097 CU_ASSERT(rc == -ENOTSUP); 1098 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1099 CU_ASSERT(rc == -ENOTSUP); 1100 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1101 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1102 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1103 1104 spdk_put_io_channel(io_ch); 1105 spdk_bdev_close(desc); 1106 free_bdev(bdev); 1107 spdk_bdev_finish(bdev_fini_cb, NULL); 1108 poll_threads(); 1109 } 1110 1111 static void 1112 bdev_io_wait_test(void) 1113 { 1114 struct spdk_bdev *bdev; 1115 struct spdk_bdev_desc *desc = NULL; 1116 struct spdk_io_channel *io_ch; 1117 struct spdk_bdev_opts bdev_opts = {}; 1118 struct bdev_ut_io_wait_entry io_wait_entry; 1119 struct bdev_ut_io_wait_entry io_wait_entry2; 1120 int rc; 1121 1122 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1123 bdev_opts.bdev_io_pool_size = 4; 1124 bdev_opts.bdev_io_cache_size = 2; 1125 1126 rc = spdk_bdev_set_opts(&bdev_opts); 1127 CU_ASSERT(rc == 0); 1128 spdk_bdev_initialize(bdev_init_cb, NULL); 1129 poll_threads(); 1130 1131 bdev = allocate_bdev("bdev0"); 1132 1133 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1134 CU_ASSERT(rc == 0); 1135 poll_threads(); 1136 SPDK_CU_ASSERT_FATAL(desc != NULL); 1137 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1138 io_ch = spdk_bdev_get_io_channel(desc); 1139 CU_ASSERT(io_ch != NULL); 1140 1141 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1142 CU_ASSERT(rc == 0); 1143 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1144 CU_ASSERT(rc == 0); 1145 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1146 CU_ASSERT(rc == 0); 1147 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1148 CU_ASSERT(rc == 0); 1149 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1150 1151 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1152 CU_ASSERT(rc == -ENOMEM); 1153 1154 io_wait_entry.entry.bdev = bdev; 1155 io_wait_entry.entry.cb_fn = io_wait_cb; 1156 io_wait_entry.entry.cb_arg = &io_wait_entry; 1157 io_wait_entry.io_ch = io_ch; 1158 io_wait_entry.desc = desc; 1159 io_wait_entry.submitted = false; 1160 /* Cannot use the same io_wait_entry for two different calls. */ 1161 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1162 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1163 1164 /* Queue two I/O waits. */ 1165 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1166 CU_ASSERT(rc == 0); 1167 CU_ASSERT(io_wait_entry.submitted == false); 1168 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1169 CU_ASSERT(rc == 0); 1170 CU_ASSERT(io_wait_entry2.submitted == false); 1171 1172 stub_complete_io(1); 1173 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1174 CU_ASSERT(io_wait_entry.submitted == true); 1175 CU_ASSERT(io_wait_entry2.submitted == false); 1176 1177 stub_complete_io(1); 1178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1179 CU_ASSERT(io_wait_entry2.submitted == true); 1180 1181 stub_complete_io(4); 1182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1183 1184 spdk_put_io_channel(io_ch); 1185 spdk_bdev_close(desc); 1186 free_bdev(bdev); 1187 spdk_bdev_finish(bdev_fini_cb, NULL); 1188 poll_threads(); 1189 } 1190 1191 static void 1192 bdev_io_spans_split_test(void) 1193 { 1194 struct spdk_bdev bdev; 1195 struct spdk_bdev_io bdev_io; 1196 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1197 1198 memset(&bdev, 0, sizeof(bdev)); 1199 bdev_io.u.bdev.iovs = iov; 1200 1201 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1202 bdev.optimal_io_boundary = 0; 1203 bdev.max_segment_size = 0; 1204 bdev.max_num_segments = 0; 1205 bdev_io.bdev = &bdev; 1206 1207 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1208 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1209 1210 bdev.split_on_optimal_io_boundary = true; 1211 bdev.optimal_io_boundary = 32; 1212 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1213 1214 /* RESETs are not based on LBAs - so this should return false. */ 1215 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1216 1217 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1218 bdev_io.u.bdev.offset_blocks = 0; 1219 bdev_io.u.bdev.num_blocks = 32; 1220 1221 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1222 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1223 1224 bdev_io.u.bdev.num_blocks = 33; 1225 1226 /* This I/O spans a boundary. */ 1227 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1228 1229 bdev_io.u.bdev.num_blocks = 32; 1230 bdev.max_segment_size = 512 * 32; 1231 bdev.max_num_segments = 1; 1232 bdev_io.u.bdev.iovcnt = 1; 1233 iov[0].iov_len = 512; 1234 1235 /* Does not cross and exceed max_size or max_segs */ 1236 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1237 1238 bdev.split_on_optimal_io_boundary = false; 1239 bdev.max_segment_size = 512; 1240 bdev.max_num_segments = 1; 1241 bdev_io.u.bdev.iovcnt = 2; 1242 1243 /* Exceed max_segs */ 1244 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1245 1246 bdev.max_num_segments = 2; 1247 iov[0].iov_len = 513; 1248 iov[1].iov_len = 512; 1249 1250 /* Exceed max_sizes */ 1251 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1252 1253 bdev.max_segment_size = 0; 1254 bdev.write_unit_size = 32; 1255 bdev.split_on_write_unit = true; 1256 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1257 1258 /* This I/O is one write unit */ 1259 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1260 1261 bdev_io.u.bdev.num_blocks = 32 * 2; 1262 1263 /* This I/O is more than one write unit */ 1264 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1265 1266 bdev_io.u.bdev.offset_blocks = 1; 1267 bdev_io.u.bdev.num_blocks = 32; 1268 1269 /* This I/O is not aligned to write unit size */ 1270 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1271 } 1272 1273 static void 1274 bdev_io_boundary_split_test(void) 1275 { 1276 struct spdk_bdev *bdev; 1277 struct spdk_bdev_desc *desc = NULL; 1278 struct spdk_io_channel *io_ch; 1279 struct spdk_bdev_opts bdev_opts = {}; 1280 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1281 struct ut_expected_io *expected_io; 1282 void *md_buf = (void *)0xFF000000; 1283 uint64_t i; 1284 int rc; 1285 1286 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1287 bdev_opts.bdev_io_pool_size = 512; 1288 bdev_opts.bdev_io_cache_size = 64; 1289 1290 rc = spdk_bdev_set_opts(&bdev_opts); 1291 CU_ASSERT(rc == 0); 1292 spdk_bdev_initialize(bdev_init_cb, NULL); 1293 1294 bdev = allocate_bdev("bdev0"); 1295 1296 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1297 CU_ASSERT(rc == 0); 1298 SPDK_CU_ASSERT_FATAL(desc != NULL); 1299 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1300 io_ch = spdk_bdev_get_io_channel(desc); 1301 CU_ASSERT(io_ch != NULL); 1302 1303 bdev->optimal_io_boundary = 16; 1304 bdev->split_on_optimal_io_boundary = false; 1305 1306 g_io_done = false; 1307 1308 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1309 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1310 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1311 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1312 1313 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1314 CU_ASSERT(rc == 0); 1315 CU_ASSERT(g_io_done == false); 1316 1317 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1318 stub_complete_io(1); 1319 CU_ASSERT(g_io_done == true); 1320 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1321 1322 bdev->split_on_optimal_io_boundary = true; 1323 bdev->md_interleave = false; 1324 bdev->md_len = 8; 1325 1326 /* Now test that a single-vector command is split correctly. 1327 * Offset 14, length 8, payload 0xF000 1328 * Child - Offset 14, length 2, payload 0xF000 1329 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1330 * 1331 * Set up the expected values before calling spdk_bdev_read_blocks 1332 */ 1333 g_io_done = false; 1334 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1335 expected_io->md_buf = md_buf; 1336 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1337 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1338 1339 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1340 expected_io->md_buf = md_buf + 2 * 8; 1341 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1343 1344 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1345 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1346 14, 8, io_done, NULL); 1347 CU_ASSERT(rc == 0); 1348 CU_ASSERT(g_io_done == false); 1349 1350 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1351 stub_complete_io(2); 1352 CU_ASSERT(g_io_done == true); 1353 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1354 1355 /* Now set up a more complex, multi-vector command that needs to be split, 1356 * including splitting iovecs. 1357 */ 1358 iov[0].iov_base = (void *)0x10000; 1359 iov[0].iov_len = 512; 1360 iov[1].iov_base = (void *)0x20000; 1361 iov[1].iov_len = 20 * 512; 1362 iov[2].iov_base = (void *)0x30000; 1363 iov[2].iov_len = 11 * 512; 1364 1365 g_io_done = false; 1366 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1367 expected_io->md_buf = md_buf; 1368 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1369 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1370 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1371 1372 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1373 expected_io->md_buf = md_buf + 2 * 8; 1374 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1375 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1376 1377 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1378 expected_io->md_buf = md_buf + 18 * 8; 1379 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1380 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1382 1383 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1384 14, 32, io_done, NULL); 1385 CU_ASSERT(rc == 0); 1386 CU_ASSERT(g_io_done == false); 1387 1388 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1389 stub_complete_io(3); 1390 CU_ASSERT(g_io_done == true); 1391 1392 /* Test multi vector command that needs to be split by strip and then needs to be 1393 * split further due to the capacity of child iovs. 1394 */ 1395 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1396 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1397 iov[i].iov_len = 512; 1398 } 1399 1400 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1401 g_io_done = false; 1402 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1403 BDEV_IO_NUM_CHILD_IOV); 1404 expected_io->md_buf = md_buf; 1405 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1406 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1407 } 1408 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1409 1410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1411 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1412 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1413 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1414 ut_expected_io_set_iov(expected_io, i, 1415 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1416 } 1417 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1418 1419 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1420 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1421 CU_ASSERT(rc == 0); 1422 CU_ASSERT(g_io_done == false); 1423 1424 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1425 stub_complete_io(1); 1426 CU_ASSERT(g_io_done == false); 1427 1428 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1429 stub_complete_io(1); 1430 CU_ASSERT(g_io_done == true); 1431 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1432 1433 /* Test multi vector command that needs to be split by strip and then needs to be 1434 * split further due to the capacity of child iovs. In this case, the length of 1435 * the rest of iovec array with an I/O boundary is the multiple of block size. 1436 */ 1437 1438 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1439 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1440 */ 1441 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1442 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1443 iov[i].iov_len = 512; 1444 } 1445 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1446 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1447 iov[i].iov_len = 256; 1448 } 1449 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1450 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1451 1452 /* Add an extra iovec to trigger split */ 1453 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1454 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1455 1456 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1457 g_io_done = false; 1458 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1459 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1460 expected_io->md_buf = md_buf; 1461 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1462 ut_expected_io_set_iov(expected_io, i, 1463 (void *)((i + 1) * 0x10000), 512); 1464 } 1465 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1466 ut_expected_io_set_iov(expected_io, i, 1467 (void *)((i + 1) * 0x10000), 256); 1468 } 1469 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1470 1471 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1472 1, 1); 1473 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1474 ut_expected_io_set_iov(expected_io, 0, 1475 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1477 1478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1479 1, 1); 1480 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1481 ut_expected_io_set_iov(expected_io, 0, 1482 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1483 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1484 1485 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1486 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1487 CU_ASSERT(rc == 0); 1488 CU_ASSERT(g_io_done == false); 1489 1490 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1491 stub_complete_io(1); 1492 CU_ASSERT(g_io_done == false); 1493 1494 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1495 stub_complete_io(2); 1496 CU_ASSERT(g_io_done == true); 1497 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1498 1499 /* Test multi vector command that needs to be split by strip and then needs to be 1500 * split further due to the capacity of child iovs, the child request offset should 1501 * be rewind to last aligned offset and go success without error. 1502 */ 1503 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1504 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1505 iov[i].iov_len = 512; 1506 } 1507 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1508 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1509 1510 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1511 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1512 1513 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1514 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1515 1516 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1517 g_io_done = false; 1518 g_io_status = 0; 1519 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1520 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1521 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1522 expected_io->md_buf = md_buf; 1523 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1524 ut_expected_io_set_iov(expected_io, i, 1525 (void *)((i + 1) * 0x10000), 512); 1526 } 1527 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1528 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1529 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1530 1, 2); 1531 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1532 ut_expected_io_set_iov(expected_io, 0, 1533 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1534 ut_expected_io_set_iov(expected_io, 1, 1535 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1536 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1537 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1538 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1539 1, 1); 1540 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1541 ut_expected_io_set_iov(expected_io, 0, 1542 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1543 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1544 1545 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1546 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1547 CU_ASSERT(rc == 0); 1548 CU_ASSERT(g_io_done == false); 1549 1550 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1551 stub_complete_io(1); 1552 CU_ASSERT(g_io_done == false); 1553 1554 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1555 stub_complete_io(2); 1556 CU_ASSERT(g_io_done == true); 1557 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1558 1559 /* Test multi vector command that needs to be split due to the IO boundary and 1560 * the capacity of child iovs. Especially test the case when the command is 1561 * split due to the capacity of child iovs, the tail address is not aligned with 1562 * block size and is rewinded to the aligned address. 1563 * 1564 * The iovecs used in read request is complex but is based on the data 1565 * collected in the real issue. We change the base addresses but keep the lengths 1566 * not to loose the credibility of the test. 1567 */ 1568 bdev->optimal_io_boundary = 128; 1569 g_io_done = false; 1570 g_io_status = 0; 1571 1572 for (i = 0; i < 31; i++) { 1573 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1574 iov[i].iov_len = 1024; 1575 } 1576 iov[31].iov_base = (void *)0xFEED1F00000; 1577 iov[31].iov_len = 32768; 1578 iov[32].iov_base = (void *)0xFEED2000000; 1579 iov[32].iov_len = 160; 1580 iov[33].iov_base = (void *)0xFEED2100000; 1581 iov[33].iov_len = 4096; 1582 iov[34].iov_base = (void *)0xFEED2200000; 1583 iov[34].iov_len = 4096; 1584 iov[35].iov_base = (void *)0xFEED2300000; 1585 iov[35].iov_len = 4096; 1586 iov[36].iov_base = (void *)0xFEED2400000; 1587 iov[36].iov_len = 4096; 1588 iov[37].iov_base = (void *)0xFEED2500000; 1589 iov[37].iov_len = 4096; 1590 iov[38].iov_base = (void *)0xFEED2600000; 1591 iov[38].iov_len = 4096; 1592 iov[39].iov_base = (void *)0xFEED2700000; 1593 iov[39].iov_len = 4096; 1594 iov[40].iov_base = (void *)0xFEED2800000; 1595 iov[40].iov_len = 4096; 1596 iov[41].iov_base = (void *)0xFEED2900000; 1597 iov[41].iov_len = 4096; 1598 iov[42].iov_base = (void *)0xFEED2A00000; 1599 iov[42].iov_len = 4096; 1600 iov[43].iov_base = (void *)0xFEED2B00000; 1601 iov[43].iov_len = 12288; 1602 iov[44].iov_base = (void *)0xFEED2C00000; 1603 iov[44].iov_len = 8192; 1604 iov[45].iov_base = (void *)0xFEED2F00000; 1605 iov[45].iov_len = 4096; 1606 iov[46].iov_base = (void *)0xFEED3000000; 1607 iov[46].iov_len = 4096; 1608 iov[47].iov_base = (void *)0xFEED3100000; 1609 iov[47].iov_len = 4096; 1610 iov[48].iov_base = (void *)0xFEED3200000; 1611 iov[48].iov_len = 24576; 1612 iov[49].iov_base = (void *)0xFEED3300000; 1613 iov[49].iov_len = 16384; 1614 iov[50].iov_base = (void *)0xFEED3400000; 1615 iov[50].iov_len = 12288; 1616 iov[51].iov_base = (void *)0xFEED3500000; 1617 iov[51].iov_len = 4096; 1618 iov[52].iov_base = (void *)0xFEED3600000; 1619 iov[52].iov_len = 4096; 1620 iov[53].iov_base = (void *)0xFEED3700000; 1621 iov[53].iov_len = 4096; 1622 iov[54].iov_base = (void *)0xFEED3800000; 1623 iov[54].iov_len = 28672; 1624 iov[55].iov_base = (void *)0xFEED3900000; 1625 iov[55].iov_len = 20480; 1626 iov[56].iov_base = (void *)0xFEED3A00000; 1627 iov[56].iov_len = 4096; 1628 iov[57].iov_base = (void *)0xFEED3B00000; 1629 iov[57].iov_len = 12288; 1630 iov[58].iov_base = (void *)0xFEED3C00000; 1631 iov[58].iov_len = 4096; 1632 iov[59].iov_base = (void *)0xFEED3D00000; 1633 iov[59].iov_len = 4096; 1634 iov[60].iov_base = (void *)0xFEED3E00000; 1635 iov[60].iov_len = 352; 1636 1637 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1638 * of child iovs, 1639 */ 1640 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1641 expected_io->md_buf = md_buf; 1642 for (i = 0; i < 32; i++) { 1643 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1644 } 1645 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1646 1647 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1648 * split by the IO boundary requirement. 1649 */ 1650 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1651 expected_io->md_buf = md_buf + 126 * 8; 1652 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1653 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1654 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1655 1656 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1657 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1658 */ 1659 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1660 expected_io->md_buf = md_buf + 128 * 8; 1661 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1662 iov[33].iov_len - 864); 1663 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1664 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1665 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1666 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1667 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1668 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1669 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1670 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1671 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1672 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1673 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1674 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1675 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1676 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1677 1678 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1679 * first 864 bytes of iov[52] split by the IO boundary requirement. 1680 */ 1681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1682 expected_io->md_buf = md_buf + 256 * 8; 1683 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1684 iov[46].iov_len - 864); 1685 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1686 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1687 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1688 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1689 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1690 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1691 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1692 1693 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1694 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1695 */ 1696 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1697 expected_io->md_buf = md_buf + 384 * 8; 1698 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1699 iov[52].iov_len - 864); 1700 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1701 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1702 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1703 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1704 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1705 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1706 1707 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1708 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1709 */ 1710 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1711 expected_io->md_buf = md_buf + 512 * 8; 1712 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1713 iov[57].iov_len - 4960); 1714 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1715 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1716 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1717 1718 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1719 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1720 expected_io->md_buf = md_buf + 542 * 8; 1721 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1722 iov[59].iov_len - 3936); 1723 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1724 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1725 1726 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1727 0, 543, io_done, NULL); 1728 CU_ASSERT(rc == 0); 1729 CU_ASSERT(g_io_done == false); 1730 1731 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1732 stub_complete_io(1); 1733 CU_ASSERT(g_io_done == false); 1734 1735 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1736 stub_complete_io(5); 1737 CU_ASSERT(g_io_done == false); 1738 1739 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1740 stub_complete_io(1); 1741 CU_ASSERT(g_io_done == true); 1742 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1743 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1744 1745 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1746 * split, so test that. 1747 */ 1748 bdev->optimal_io_boundary = 15; 1749 g_io_done = false; 1750 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1751 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1752 1753 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1754 CU_ASSERT(rc == 0); 1755 CU_ASSERT(g_io_done == false); 1756 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1757 stub_complete_io(1); 1758 CU_ASSERT(g_io_done == true); 1759 1760 /* Test an UNMAP. This should also not be split. */ 1761 bdev->optimal_io_boundary = 16; 1762 g_io_done = false; 1763 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1764 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1765 1766 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1767 CU_ASSERT(rc == 0); 1768 CU_ASSERT(g_io_done == false); 1769 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1770 stub_complete_io(1); 1771 CU_ASSERT(g_io_done == true); 1772 1773 /* Test a FLUSH. This should also not be split. */ 1774 bdev->optimal_io_boundary = 16; 1775 g_io_done = false; 1776 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1777 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1778 1779 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1780 CU_ASSERT(rc == 0); 1781 CU_ASSERT(g_io_done == false); 1782 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1783 stub_complete_io(1); 1784 CU_ASSERT(g_io_done == true); 1785 1786 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1787 1788 /* Children requests return an error status */ 1789 bdev->optimal_io_boundary = 16; 1790 iov[0].iov_base = (void *)0x10000; 1791 iov[0].iov_len = 512 * 64; 1792 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1793 g_io_done = false; 1794 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1795 1796 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1797 CU_ASSERT(rc == 0); 1798 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1799 stub_complete_io(4); 1800 CU_ASSERT(g_io_done == false); 1801 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1802 stub_complete_io(1); 1803 CU_ASSERT(g_io_done == true); 1804 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1805 1806 /* Test if a multi vector command terminated with failure before continuing 1807 * splitting process when one of child I/O failed. 1808 * The multi vector command is as same as the above that needs to be split by strip 1809 * and then needs to be split further due to the capacity of child iovs. 1810 */ 1811 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1812 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1813 iov[i].iov_len = 512; 1814 } 1815 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1816 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1817 1818 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1819 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1820 1821 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1822 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1823 1824 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1825 1826 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1827 g_io_done = false; 1828 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1829 1830 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1831 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1832 CU_ASSERT(rc == 0); 1833 CU_ASSERT(g_io_done == false); 1834 1835 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1836 stub_complete_io(1); 1837 CU_ASSERT(g_io_done == true); 1838 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1839 1840 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1841 1842 /* for this test we will create the following conditions to hit the code path where 1843 * we are trying to send and IO following a split that has no iovs because we had to 1844 * trim them for alignment reasons. 1845 * 1846 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1847 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1848 * position 30 and overshoot by 0x2e. 1849 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1850 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1851 * which eliniates that vector so we just send the first split IO with 30 vectors 1852 * and let the completion pick up the last 2 vectors. 1853 */ 1854 bdev->optimal_io_boundary = 32; 1855 bdev->split_on_optimal_io_boundary = true; 1856 g_io_done = false; 1857 1858 /* Init all parent IOVs to 0x212 */ 1859 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1860 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1861 iov[i].iov_len = 0x212; 1862 } 1863 1864 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1865 BDEV_IO_NUM_CHILD_IOV - 1); 1866 /* expect 0-29 to be 1:1 with the parent iov */ 1867 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1868 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1869 } 1870 1871 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1872 * where 0x1e is the amount we overshot the 16K boundary 1873 */ 1874 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1875 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1877 1878 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1879 * shortened that take it to the next boundary and then a final one to get us to 1880 * 0x4200 bytes for the IO. 1881 */ 1882 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1883 BDEV_IO_NUM_CHILD_IOV, 2); 1884 /* position 30 picked up the remaining bytes to the next boundary */ 1885 ut_expected_io_set_iov(expected_io, 0, 1886 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1887 1888 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1889 ut_expected_io_set_iov(expected_io, 1, 1890 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1891 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1892 1893 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1894 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1895 CU_ASSERT(rc == 0); 1896 CU_ASSERT(g_io_done == false); 1897 1898 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1899 stub_complete_io(1); 1900 CU_ASSERT(g_io_done == false); 1901 1902 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1903 stub_complete_io(1); 1904 CU_ASSERT(g_io_done == true); 1905 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1906 1907 spdk_put_io_channel(io_ch); 1908 spdk_bdev_close(desc); 1909 free_bdev(bdev); 1910 spdk_bdev_finish(bdev_fini_cb, NULL); 1911 poll_threads(); 1912 } 1913 1914 static void 1915 bdev_io_max_size_and_segment_split_test(void) 1916 { 1917 struct spdk_bdev *bdev; 1918 struct spdk_bdev_desc *desc = NULL; 1919 struct spdk_io_channel *io_ch; 1920 struct spdk_bdev_opts bdev_opts = {}; 1921 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1922 struct ut_expected_io *expected_io; 1923 uint64_t i; 1924 int rc; 1925 1926 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1927 bdev_opts.bdev_io_pool_size = 512; 1928 bdev_opts.bdev_io_cache_size = 64; 1929 1930 bdev_opts.opts_size = sizeof(bdev_opts); 1931 rc = spdk_bdev_set_opts(&bdev_opts); 1932 CU_ASSERT(rc == 0); 1933 spdk_bdev_initialize(bdev_init_cb, NULL); 1934 1935 bdev = allocate_bdev("bdev0"); 1936 1937 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1938 CU_ASSERT(rc == 0); 1939 SPDK_CU_ASSERT_FATAL(desc != NULL); 1940 io_ch = spdk_bdev_get_io_channel(desc); 1941 CU_ASSERT(io_ch != NULL); 1942 1943 bdev->split_on_optimal_io_boundary = false; 1944 bdev->optimal_io_boundary = 0; 1945 1946 /* Case 0 max_num_segments == 0. 1947 * but segment size 2 * 512 > 512 1948 */ 1949 bdev->max_segment_size = 512; 1950 bdev->max_num_segments = 0; 1951 g_io_done = false; 1952 1953 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1954 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1955 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1956 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1957 1958 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1959 CU_ASSERT(rc == 0); 1960 CU_ASSERT(g_io_done == false); 1961 1962 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1963 stub_complete_io(1); 1964 CU_ASSERT(g_io_done == true); 1965 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1966 1967 /* Case 1 max_segment_size == 0 1968 * but iov num 2 > 1. 1969 */ 1970 bdev->max_segment_size = 0; 1971 bdev->max_num_segments = 1; 1972 g_io_done = false; 1973 1974 iov[0].iov_base = (void *)0x10000; 1975 iov[0].iov_len = 512; 1976 iov[1].iov_base = (void *)0x20000; 1977 iov[1].iov_len = 8 * 512; 1978 1979 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1980 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1981 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1982 1983 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1984 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1985 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1986 1987 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1988 CU_ASSERT(rc == 0); 1989 CU_ASSERT(g_io_done == false); 1990 1991 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1992 stub_complete_io(2); 1993 CU_ASSERT(g_io_done == true); 1994 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1995 1996 /* Test that a non-vector command is split correctly. 1997 * Set up the expected values before calling spdk_bdev_read_blocks 1998 */ 1999 bdev->max_segment_size = 512; 2000 bdev->max_num_segments = 1; 2001 g_io_done = false; 2002 2003 /* Child IO 0 */ 2004 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2005 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2006 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2007 2008 /* Child IO 1 */ 2009 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2010 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2011 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2012 2013 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2014 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2015 CU_ASSERT(rc == 0); 2016 CU_ASSERT(g_io_done == false); 2017 2018 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2019 stub_complete_io(2); 2020 CU_ASSERT(g_io_done == true); 2021 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2022 2023 /* Now set up a more complex, multi-vector command that needs to be split, 2024 * including splitting iovecs. 2025 */ 2026 bdev->max_segment_size = 2 * 512; 2027 bdev->max_num_segments = 1; 2028 g_io_done = false; 2029 2030 iov[0].iov_base = (void *)0x10000; 2031 iov[0].iov_len = 2 * 512; 2032 iov[1].iov_base = (void *)0x20000; 2033 iov[1].iov_len = 4 * 512; 2034 iov[2].iov_base = (void *)0x30000; 2035 iov[2].iov_len = 6 * 512; 2036 2037 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2038 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2039 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2040 2041 /* Split iov[1].size to 2 iov entries then split the segments */ 2042 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2043 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2044 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2045 2046 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2047 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2048 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2049 2050 /* Split iov[2].size to 3 iov entries then split the segments */ 2051 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2052 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2053 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2054 2055 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2056 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2057 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2058 2059 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2060 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2061 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2062 2063 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2064 CU_ASSERT(rc == 0); 2065 CU_ASSERT(g_io_done == false); 2066 2067 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2068 stub_complete_io(6); 2069 CU_ASSERT(g_io_done == true); 2070 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2071 2072 /* Test multi vector command that needs to be split by strip and then needs to be 2073 * split further due to the capacity of parent IO child iovs. 2074 */ 2075 bdev->max_segment_size = 512; 2076 bdev->max_num_segments = 1; 2077 g_io_done = false; 2078 2079 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2080 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2081 iov[i].iov_len = 512 * 2; 2082 } 2083 2084 /* Each input iov.size is split into 2 iovs, 2085 * half of the input iov can fill all child iov entries of a single IO. 2086 */ 2087 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2088 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2089 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2090 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2091 2092 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2093 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2094 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2095 } 2096 2097 /* The remaining iov is split in the second round */ 2098 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2099 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2100 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2101 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2102 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2104 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2106 } 2107 2108 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2109 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2110 CU_ASSERT(rc == 0); 2111 CU_ASSERT(g_io_done == false); 2112 2113 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2114 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2115 CU_ASSERT(g_io_done == false); 2116 2117 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2118 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2119 CU_ASSERT(g_io_done == true); 2120 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2121 2122 /* A wrong case, a child IO that is divided does 2123 * not meet the principle of multiples of block size, 2124 * and exits with error 2125 */ 2126 bdev->max_segment_size = 512; 2127 bdev->max_num_segments = 1; 2128 g_io_done = false; 2129 2130 iov[0].iov_base = (void *)0x10000; 2131 iov[0].iov_len = 512 + 256; 2132 iov[1].iov_base = (void *)0x20000; 2133 iov[1].iov_len = 256; 2134 2135 /* iov[0] is split to 512 and 256. 2136 * 256 is less than a block size, and it is found 2137 * in the next round of split that it is the first child IO smaller than 2138 * the block size, so the error exit 2139 */ 2140 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2141 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2142 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2143 2144 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2145 CU_ASSERT(rc == 0); 2146 CU_ASSERT(g_io_done == false); 2147 2148 /* First child IO is OK */ 2149 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2150 stub_complete_io(1); 2151 CU_ASSERT(g_io_done == true); 2152 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2153 2154 /* error exit */ 2155 stub_complete_io(1); 2156 CU_ASSERT(g_io_done == true); 2157 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2158 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2159 2160 /* Test multi vector command that needs to be split by strip and then needs to be 2161 * split further due to the capacity of child iovs. 2162 * 2163 * In this case, the last two iovs need to be split, but it will exceed the capacity 2164 * of child iovs, so it needs to wait until the first batch completed. 2165 */ 2166 bdev->max_segment_size = 512; 2167 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2168 g_io_done = false; 2169 2170 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2171 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2172 iov[i].iov_len = 512; 2173 } 2174 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2175 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2176 iov[i].iov_len = 512 * 2; 2177 } 2178 2179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2180 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2181 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2182 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2183 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2184 } 2185 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2186 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2187 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2188 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2189 2190 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2191 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2192 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2193 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2194 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2195 2196 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2197 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2198 CU_ASSERT(rc == 0); 2199 CU_ASSERT(g_io_done == false); 2200 2201 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2202 stub_complete_io(1); 2203 CU_ASSERT(g_io_done == false); 2204 2205 /* Next round */ 2206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2207 stub_complete_io(1); 2208 CU_ASSERT(g_io_done == true); 2209 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2210 2211 /* This case is similar to the previous one, but the io composed of 2212 * the last few entries of child iov is not enough for a blocklen, so they 2213 * cannot be put into this IO, but wait until the next time. 2214 */ 2215 bdev->max_segment_size = 512; 2216 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2217 g_io_done = false; 2218 2219 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2220 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2221 iov[i].iov_len = 512; 2222 } 2223 2224 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2225 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2226 iov[i].iov_len = 128; 2227 } 2228 2229 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2230 * Because the left 2 iov is not enough for a blocklen. 2231 */ 2232 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2233 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2234 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2235 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2236 } 2237 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2238 2239 /* The second child io waits until the end of the first child io before executing. 2240 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2241 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2242 */ 2243 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2244 1, 4); 2245 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2246 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2247 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2248 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2249 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2250 2251 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2252 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2253 CU_ASSERT(rc == 0); 2254 CU_ASSERT(g_io_done == false); 2255 2256 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2257 stub_complete_io(1); 2258 CU_ASSERT(g_io_done == false); 2259 2260 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2261 stub_complete_io(1); 2262 CU_ASSERT(g_io_done == true); 2263 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2264 2265 /* A very complicated case. Each sg entry exceeds max_segment_size and 2266 * needs to be split. At the same time, child io must be a multiple of blocklen. 2267 * At the same time, child iovcnt exceeds parent iovcnt. 2268 */ 2269 bdev->max_segment_size = 512 + 128; 2270 bdev->max_num_segments = 3; 2271 g_io_done = false; 2272 2273 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2274 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2275 iov[i].iov_len = 512 + 256; 2276 } 2277 2278 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2279 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2280 iov[i].iov_len = 512 + 128; 2281 } 2282 2283 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2284 * Consume 4 parent IO iov entries per for() round and 6 block size. 2285 * Generate 9 child IOs. 2286 */ 2287 for (i = 0; i < 3; i++) { 2288 uint32_t j = i * 4; 2289 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2290 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2291 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2292 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2293 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2294 2295 /* Child io must be a multiple of blocklen 2296 * iov[j + 2] must be split. If the third entry is also added, 2297 * the multiple of blocklen cannot be guaranteed. But it still 2298 * occupies one iov entry of the parent child iov. 2299 */ 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2301 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2302 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2303 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2304 2305 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2306 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2307 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2308 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2309 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2310 } 2311 2312 /* Child iov position at 27, the 10th child IO 2313 * iov entry index is 3 * 4 and offset is 3 * 6 2314 */ 2315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2316 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2317 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2318 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2319 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2320 2321 /* Child iov position at 30, the 11th child IO */ 2322 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2323 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2324 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2325 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2326 2327 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2328 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2329 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2330 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2331 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2332 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2333 2334 /* Consume 9 child IOs and 27 child iov entries. 2335 * Consume 4 parent IO iov entries per for() round and 6 block size. 2336 * Parent IO iov index start from 16 and block offset start from 24 2337 */ 2338 for (i = 0; i < 3; i++) { 2339 uint32_t j = i * 4 + 16; 2340 uint32_t offset = i * 6 + 24; 2341 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2342 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2343 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2344 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2345 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2346 2347 /* Child io must be a multiple of blocklen 2348 * iov[j + 2] must be split. If the third entry is also added, 2349 * the multiple of blocklen cannot be guaranteed. But it still 2350 * occupies one iov entry of the parent child iov. 2351 */ 2352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2353 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2354 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2355 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2356 2357 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2358 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2359 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2360 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2361 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2362 } 2363 2364 /* The 22th child IO, child iov position at 30 */ 2365 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2366 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2367 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2368 2369 /* The third round */ 2370 /* Here is the 23nd child IO and child iovpos is 0 */ 2371 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2372 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2373 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2374 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2375 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2376 2377 /* The 24th child IO */ 2378 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2379 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2380 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2381 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2382 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2383 2384 /* The 25th child IO */ 2385 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2386 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2387 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2388 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2389 2390 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2391 50, io_done, NULL); 2392 CU_ASSERT(rc == 0); 2393 CU_ASSERT(g_io_done == false); 2394 2395 /* Parent IO supports up to 32 child iovs, so it is calculated that 2396 * a maximum of 11 IOs can be split at a time, and the 2397 * splitting will continue after the first batch is over. 2398 */ 2399 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2400 stub_complete_io(11); 2401 CU_ASSERT(g_io_done == false); 2402 2403 /* The 2nd round */ 2404 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2405 stub_complete_io(11); 2406 CU_ASSERT(g_io_done == false); 2407 2408 /* The last round */ 2409 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2410 stub_complete_io(3); 2411 CU_ASSERT(g_io_done == true); 2412 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2413 2414 /* Test an WRITE_ZEROES. This should also not be split. */ 2415 bdev->max_segment_size = 512; 2416 bdev->max_num_segments = 1; 2417 g_io_done = false; 2418 2419 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2421 2422 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2423 CU_ASSERT(rc == 0); 2424 CU_ASSERT(g_io_done == false); 2425 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2426 stub_complete_io(1); 2427 CU_ASSERT(g_io_done == true); 2428 2429 /* Test an UNMAP. This should also not be split. */ 2430 g_io_done = false; 2431 2432 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2433 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2434 2435 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2436 CU_ASSERT(rc == 0); 2437 CU_ASSERT(g_io_done == false); 2438 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2439 stub_complete_io(1); 2440 CU_ASSERT(g_io_done == true); 2441 2442 /* Test a FLUSH. This should also not be split. */ 2443 g_io_done = false; 2444 2445 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2447 2448 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2449 CU_ASSERT(rc == 0); 2450 CU_ASSERT(g_io_done == false); 2451 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2452 stub_complete_io(1); 2453 CU_ASSERT(g_io_done == true); 2454 2455 spdk_put_io_channel(io_ch); 2456 spdk_bdev_close(desc); 2457 free_bdev(bdev); 2458 spdk_bdev_finish(bdev_fini_cb, NULL); 2459 poll_threads(); 2460 } 2461 2462 static void 2463 bdev_io_mix_split_test(void) 2464 { 2465 struct spdk_bdev *bdev; 2466 struct spdk_bdev_desc *desc = NULL; 2467 struct spdk_io_channel *io_ch; 2468 struct spdk_bdev_opts bdev_opts = {}; 2469 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2470 struct ut_expected_io *expected_io; 2471 uint64_t i; 2472 int rc; 2473 2474 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2475 bdev_opts.bdev_io_pool_size = 512; 2476 bdev_opts.bdev_io_cache_size = 64; 2477 2478 rc = spdk_bdev_set_opts(&bdev_opts); 2479 CU_ASSERT(rc == 0); 2480 spdk_bdev_initialize(bdev_init_cb, NULL); 2481 2482 bdev = allocate_bdev("bdev0"); 2483 2484 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2485 CU_ASSERT(rc == 0); 2486 SPDK_CU_ASSERT_FATAL(desc != NULL); 2487 io_ch = spdk_bdev_get_io_channel(desc); 2488 CU_ASSERT(io_ch != NULL); 2489 2490 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2491 bdev->split_on_optimal_io_boundary = true; 2492 bdev->optimal_io_boundary = 16; 2493 2494 bdev->max_segment_size = 512; 2495 bdev->max_num_segments = 16; 2496 g_io_done = false; 2497 2498 /* IO crossing the IO boundary requires split 2499 * Total 2 child IOs. 2500 */ 2501 2502 /* The 1st child IO split the segment_size to multiple segment entry */ 2503 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2504 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2505 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2506 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2507 2508 /* The 2nd child IO split the segment_size to multiple segment entry */ 2509 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2510 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2511 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2512 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2513 2514 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2515 CU_ASSERT(rc == 0); 2516 CU_ASSERT(g_io_done == false); 2517 2518 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2519 stub_complete_io(2); 2520 CU_ASSERT(g_io_done == true); 2521 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2522 2523 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2524 bdev->max_segment_size = 15 * 512; 2525 bdev->max_num_segments = 1; 2526 g_io_done = false; 2527 2528 /* IO crossing the IO boundary requires split. 2529 * The 1st child IO segment size exceeds the max_segment_size, 2530 * So 1st child IO will be splitted to multiple segment entry. 2531 * Then it split to 2 child IOs because of the max_num_segments. 2532 * Total 3 child IOs. 2533 */ 2534 2535 /* The first 2 IOs are in an IO boundary. 2536 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2537 * So it split to the first 2 IOs. 2538 */ 2539 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2540 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2544 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2545 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2546 2547 /* The 3rd Child IO is because of the io boundary */ 2548 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2549 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2550 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2551 2552 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2553 CU_ASSERT(rc == 0); 2554 CU_ASSERT(g_io_done == false); 2555 2556 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2557 stub_complete_io(3); 2558 CU_ASSERT(g_io_done == true); 2559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2560 2561 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2562 bdev->max_segment_size = 17 * 512; 2563 bdev->max_num_segments = 1; 2564 g_io_done = false; 2565 2566 /* IO crossing the IO boundary requires split. 2567 * Child IO does not split. 2568 * Total 2 child IOs. 2569 */ 2570 2571 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2572 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2573 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2574 2575 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2576 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2577 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2578 2579 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2580 CU_ASSERT(rc == 0); 2581 CU_ASSERT(g_io_done == false); 2582 2583 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2584 stub_complete_io(2); 2585 CU_ASSERT(g_io_done == true); 2586 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2587 2588 /* Now set up a more complex, multi-vector command that needs to be split, 2589 * including splitting iovecs. 2590 * optimal_io_boundary < max_segment_size * max_num_segments 2591 */ 2592 bdev->max_segment_size = 3 * 512; 2593 bdev->max_num_segments = 6; 2594 g_io_done = false; 2595 2596 iov[0].iov_base = (void *)0x10000; 2597 iov[0].iov_len = 4 * 512; 2598 iov[1].iov_base = (void *)0x20000; 2599 iov[1].iov_len = 4 * 512; 2600 iov[2].iov_base = (void *)0x30000; 2601 iov[2].iov_len = 10 * 512; 2602 2603 /* IO crossing the IO boundary requires split. 2604 * The 1st child IO segment size exceeds the max_segment_size and after 2605 * splitting segment_size, the num_segments exceeds max_num_segments. 2606 * So 1st child IO will be splitted to 2 child IOs. 2607 * Total 3 child IOs. 2608 */ 2609 2610 /* The first 2 IOs are in an IO boundary. 2611 * After splitting segment size the segment num exceeds. 2612 * So it splits to 2 child IOs. 2613 */ 2614 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2615 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2616 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2617 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2618 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2619 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2620 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2621 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2622 2623 /* The 2nd child IO has the left segment entry */ 2624 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2625 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2627 2628 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2629 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2631 2632 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2633 CU_ASSERT(rc == 0); 2634 CU_ASSERT(g_io_done == false); 2635 2636 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2637 stub_complete_io(3); 2638 CU_ASSERT(g_io_done == true); 2639 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2640 2641 /* A very complicated case. Each sg entry exceeds max_segment_size 2642 * and split on io boundary. 2643 * optimal_io_boundary < max_segment_size * max_num_segments 2644 */ 2645 bdev->max_segment_size = 3 * 512; 2646 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2647 g_io_done = false; 2648 2649 for (i = 0; i < 20; i++) { 2650 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2651 iov[i].iov_len = 512 * 4; 2652 } 2653 2654 /* IO crossing the IO boundary requires split. 2655 * 80 block length can split 5 child IOs base on offset and IO boundary. 2656 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2657 * Total 5 child IOs. 2658 */ 2659 2660 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2661 * So each child IO occupies 8 child iov entries. 2662 */ 2663 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2664 for (i = 0; i < 4; i++) { 2665 int iovcnt = i * 2; 2666 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2667 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2668 } 2669 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2670 2671 /* 2nd child IO and total 16 child iov entries of parent IO */ 2672 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2673 for (i = 4; i < 8; i++) { 2674 int iovcnt = (i - 4) * 2; 2675 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2676 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2677 } 2678 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2679 2680 /* 3rd child IO and total 24 child iov entries of parent IO */ 2681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2682 for (i = 8; i < 12; i++) { 2683 int iovcnt = (i - 8) * 2; 2684 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2685 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2686 } 2687 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2688 2689 /* 4th child IO and total 32 child iov entries of parent IO */ 2690 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2691 for (i = 12; i < 16; i++) { 2692 int iovcnt = (i - 12) * 2; 2693 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2694 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2695 } 2696 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2697 2698 /* 5th child IO and because of the child iov entry it should be splitted 2699 * in next round. 2700 */ 2701 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2702 for (i = 16; i < 20; i++) { 2703 int iovcnt = (i - 16) * 2; 2704 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2705 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2706 } 2707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2708 2709 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2710 CU_ASSERT(rc == 0); 2711 CU_ASSERT(g_io_done == false); 2712 2713 /* First split round */ 2714 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2715 stub_complete_io(4); 2716 CU_ASSERT(g_io_done == false); 2717 2718 /* Second split round */ 2719 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2720 stub_complete_io(1); 2721 CU_ASSERT(g_io_done == true); 2722 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2723 2724 spdk_put_io_channel(io_ch); 2725 spdk_bdev_close(desc); 2726 free_bdev(bdev); 2727 spdk_bdev_finish(bdev_fini_cb, NULL); 2728 poll_threads(); 2729 } 2730 2731 static void 2732 bdev_io_split_with_io_wait(void) 2733 { 2734 struct spdk_bdev *bdev; 2735 struct spdk_bdev_desc *desc = NULL; 2736 struct spdk_io_channel *io_ch; 2737 struct spdk_bdev_channel *channel; 2738 struct spdk_bdev_mgmt_channel *mgmt_ch; 2739 struct spdk_bdev_opts bdev_opts = {}; 2740 struct iovec iov[3]; 2741 struct ut_expected_io *expected_io; 2742 int rc; 2743 2744 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2745 bdev_opts.bdev_io_pool_size = 2; 2746 bdev_opts.bdev_io_cache_size = 1; 2747 2748 rc = spdk_bdev_set_opts(&bdev_opts); 2749 CU_ASSERT(rc == 0); 2750 spdk_bdev_initialize(bdev_init_cb, NULL); 2751 2752 bdev = allocate_bdev("bdev0"); 2753 2754 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2755 CU_ASSERT(rc == 0); 2756 CU_ASSERT(desc != NULL); 2757 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2758 io_ch = spdk_bdev_get_io_channel(desc); 2759 CU_ASSERT(io_ch != NULL); 2760 channel = spdk_io_channel_get_ctx(io_ch); 2761 mgmt_ch = channel->shared_resource->mgmt_ch; 2762 2763 bdev->optimal_io_boundary = 16; 2764 bdev->split_on_optimal_io_boundary = true; 2765 2766 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2767 CU_ASSERT(rc == 0); 2768 2769 /* Now test that a single-vector command is split correctly. 2770 * Offset 14, length 8, payload 0xF000 2771 * Child - Offset 14, length 2, payload 0xF000 2772 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2773 * 2774 * Set up the expected values before calling spdk_bdev_read_blocks 2775 */ 2776 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2777 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2778 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2779 2780 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2781 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2782 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2783 2784 /* The following children will be submitted sequentially due to the capacity of 2785 * spdk_bdev_io. 2786 */ 2787 2788 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2789 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2790 CU_ASSERT(rc == 0); 2791 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2792 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2793 2794 /* Completing the first read I/O will submit the first child */ 2795 stub_complete_io(1); 2796 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2797 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2798 2799 /* Completing the first child will submit the second child */ 2800 stub_complete_io(1); 2801 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2802 2803 /* Complete the second child I/O. This should result in our callback getting 2804 * invoked since the parent I/O is now complete. 2805 */ 2806 stub_complete_io(1); 2807 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2808 2809 /* Now set up a more complex, multi-vector command that needs to be split, 2810 * including splitting iovecs. 2811 */ 2812 iov[0].iov_base = (void *)0x10000; 2813 iov[0].iov_len = 512; 2814 iov[1].iov_base = (void *)0x20000; 2815 iov[1].iov_len = 20 * 512; 2816 iov[2].iov_base = (void *)0x30000; 2817 iov[2].iov_len = 11 * 512; 2818 2819 g_io_done = false; 2820 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2821 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2822 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2823 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2824 2825 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2826 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2827 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2828 2829 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2830 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2831 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2832 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2833 2834 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2835 CU_ASSERT(rc == 0); 2836 CU_ASSERT(g_io_done == false); 2837 2838 /* The following children will be submitted sequentially due to the capacity of 2839 * spdk_bdev_io. 2840 */ 2841 2842 /* Completing the first child will submit the second child */ 2843 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2844 stub_complete_io(1); 2845 CU_ASSERT(g_io_done == false); 2846 2847 /* Completing the second child will submit the third child */ 2848 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2849 stub_complete_io(1); 2850 CU_ASSERT(g_io_done == false); 2851 2852 /* Completing the third child will result in our callback getting invoked 2853 * since the parent I/O is now complete. 2854 */ 2855 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2856 stub_complete_io(1); 2857 CU_ASSERT(g_io_done == true); 2858 2859 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2860 2861 spdk_put_io_channel(io_ch); 2862 spdk_bdev_close(desc); 2863 free_bdev(bdev); 2864 spdk_bdev_finish(bdev_fini_cb, NULL); 2865 poll_threads(); 2866 } 2867 2868 static void 2869 bdev_io_write_unit_split_test(void) 2870 { 2871 struct spdk_bdev *bdev; 2872 struct spdk_bdev_desc *desc = NULL; 2873 struct spdk_io_channel *io_ch; 2874 struct spdk_bdev_opts bdev_opts = {}; 2875 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 4]; 2876 struct ut_expected_io *expected_io; 2877 uint64_t i; 2878 int rc; 2879 2880 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2881 bdev_opts.bdev_io_pool_size = 512; 2882 bdev_opts.bdev_io_cache_size = 64; 2883 2884 rc = spdk_bdev_set_opts(&bdev_opts); 2885 CU_ASSERT(rc == 0); 2886 spdk_bdev_initialize(bdev_init_cb, NULL); 2887 2888 bdev = allocate_bdev("bdev0"); 2889 2890 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2891 CU_ASSERT(rc == 0); 2892 SPDK_CU_ASSERT_FATAL(desc != NULL); 2893 io_ch = spdk_bdev_get_io_channel(desc); 2894 CU_ASSERT(io_ch != NULL); 2895 2896 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2897 bdev->write_unit_size = 32; 2898 bdev->split_on_write_unit = true; 2899 g_io_done = false; 2900 2901 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2902 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2903 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2904 2905 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 2906 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 2907 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2908 2909 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2910 CU_ASSERT(rc == 0); 2911 CU_ASSERT(g_io_done == false); 2912 2913 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2914 stub_complete_io(2); 2915 CU_ASSERT(g_io_done == true); 2916 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2917 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2918 2919 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 2920 * based on write_unit_size, not optimal_io_boundary */ 2921 bdev->split_on_optimal_io_boundary = true; 2922 bdev->optimal_io_boundary = 16; 2923 g_io_done = false; 2924 2925 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2926 CU_ASSERT(rc == 0); 2927 CU_ASSERT(g_io_done == false); 2928 2929 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2930 stub_complete_io(2); 2931 CU_ASSERT(g_io_done == true); 2932 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2933 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2934 2935 /* Write I/O should fail if it is smaller than write_unit_size */ 2936 g_io_done = false; 2937 2938 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 2939 CU_ASSERT(rc == 0); 2940 CU_ASSERT(g_io_done == false); 2941 2942 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2943 poll_threads(); 2944 CU_ASSERT(g_io_done == true); 2945 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2946 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2947 2948 /* Same for I/O not aligned to write_unit_size */ 2949 g_io_done = false; 2950 2951 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 2952 CU_ASSERT(rc == 0); 2953 CU_ASSERT(g_io_done == false); 2954 2955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2956 poll_threads(); 2957 CU_ASSERT(g_io_done == true); 2958 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2959 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2960 2961 /* Write should fail if it needs to be split but there are not enough iovs to submit 2962 * an entire write unit */ 2963 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 2964 g_io_done = false; 2965 2966 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 2967 iov[i].iov_base = (void *)(0x1000 + 512 * i); 2968 iov[i].iov_len = 512; 2969 } 2970 2971 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 2972 io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_io_done == false); 2975 2976 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2977 poll_threads(); 2978 CU_ASSERT(g_io_done == true); 2979 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2980 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2981 2982 spdk_put_io_channel(io_ch); 2983 spdk_bdev_close(desc); 2984 free_bdev(bdev); 2985 spdk_bdev_finish(bdev_fini_cb, NULL); 2986 poll_threads(); 2987 } 2988 2989 static void 2990 bdev_io_alignment(void) 2991 { 2992 struct spdk_bdev *bdev; 2993 struct spdk_bdev_desc *desc = NULL; 2994 struct spdk_io_channel *io_ch; 2995 struct spdk_bdev_opts bdev_opts = {}; 2996 int rc; 2997 void *buf = NULL; 2998 struct iovec iovs[2]; 2999 int iovcnt; 3000 uint64_t alignment; 3001 3002 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3003 bdev_opts.bdev_io_pool_size = 20; 3004 bdev_opts.bdev_io_cache_size = 2; 3005 3006 rc = spdk_bdev_set_opts(&bdev_opts); 3007 CU_ASSERT(rc == 0); 3008 spdk_bdev_initialize(bdev_init_cb, NULL); 3009 3010 fn_table.submit_request = stub_submit_request_get_buf; 3011 bdev = allocate_bdev("bdev0"); 3012 3013 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3014 CU_ASSERT(rc == 0); 3015 CU_ASSERT(desc != NULL); 3016 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3017 io_ch = spdk_bdev_get_io_channel(desc); 3018 CU_ASSERT(io_ch != NULL); 3019 3020 /* Create aligned buffer */ 3021 rc = posix_memalign(&buf, 4096, 8192); 3022 SPDK_CU_ASSERT_FATAL(rc == 0); 3023 3024 /* Pass aligned single buffer with no alignment required */ 3025 alignment = 1; 3026 bdev->required_alignment = spdk_u32log2(alignment); 3027 3028 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3029 CU_ASSERT(rc == 0); 3030 stub_complete_io(1); 3031 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3032 alignment)); 3033 3034 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3035 CU_ASSERT(rc == 0); 3036 stub_complete_io(1); 3037 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3038 alignment)); 3039 3040 /* Pass unaligned single buffer with no alignment required */ 3041 alignment = 1; 3042 bdev->required_alignment = spdk_u32log2(alignment); 3043 3044 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3045 CU_ASSERT(rc == 0); 3046 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3047 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3048 stub_complete_io(1); 3049 3050 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3051 CU_ASSERT(rc == 0); 3052 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3053 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3054 stub_complete_io(1); 3055 3056 /* Pass unaligned single buffer with 512 alignment required */ 3057 alignment = 512; 3058 bdev->required_alignment = spdk_u32log2(alignment); 3059 3060 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3061 CU_ASSERT(rc == 0); 3062 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3063 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3064 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3065 alignment)); 3066 stub_complete_io(1); 3067 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3068 3069 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3070 CU_ASSERT(rc == 0); 3071 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3072 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3073 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3074 alignment)); 3075 stub_complete_io(1); 3076 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3077 3078 /* Pass unaligned single buffer with 4096 alignment required */ 3079 alignment = 4096; 3080 bdev->required_alignment = spdk_u32log2(alignment); 3081 3082 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3083 CU_ASSERT(rc == 0); 3084 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3085 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3086 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3087 alignment)); 3088 stub_complete_io(1); 3089 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3090 3091 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3092 CU_ASSERT(rc == 0); 3093 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3094 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3095 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3096 alignment)); 3097 stub_complete_io(1); 3098 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3099 3100 /* Pass aligned iovs with no alignment required */ 3101 alignment = 1; 3102 bdev->required_alignment = spdk_u32log2(alignment); 3103 3104 iovcnt = 1; 3105 iovs[0].iov_base = buf; 3106 iovs[0].iov_len = 512; 3107 3108 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3109 CU_ASSERT(rc == 0); 3110 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3111 stub_complete_io(1); 3112 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3113 3114 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3115 CU_ASSERT(rc == 0); 3116 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3117 stub_complete_io(1); 3118 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3119 3120 /* Pass unaligned iovs with no alignment required */ 3121 alignment = 1; 3122 bdev->required_alignment = spdk_u32log2(alignment); 3123 3124 iovcnt = 2; 3125 iovs[0].iov_base = buf + 16; 3126 iovs[0].iov_len = 256; 3127 iovs[1].iov_base = buf + 16 + 256 + 32; 3128 iovs[1].iov_len = 256; 3129 3130 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3131 CU_ASSERT(rc == 0); 3132 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3133 stub_complete_io(1); 3134 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3135 3136 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3137 CU_ASSERT(rc == 0); 3138 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3139 stub_complete_io(1); 3140 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3141 3142 /* Pass unaligned iov with 2048 alignment required */ 3143 alignment = 2048; 3144 bdev->required_alignment = spdk_u32log2(alignment); 3145 3146 iovcnt = 2; 3147 iovs[0].iov_base = buf + 16; 3148 iovs[0].iov_len = 256; 3149 iovs[1].iov_base = buf + 16 + 256 + 32; 3150 iovs[1].iov_len = 256; 3151 3152 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3153 CU_ASSERT(rc == 0); 3154 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3155 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3156 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3157 alignment)); 3158 stub_complete_io(1); 3159 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3160 3161 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3162 CU_ASSERT(rc == 0); 3163 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3164 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3165 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3166 alignment)); 3167 stub_complete_io(1); 3168 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3169 3170 /* Pass iov without allocated buffer without alignment required */ 3171 alignment = 1; 3172 bdev->required_alignment = spdk_u32log2(alignment); 3173 3174 iovcnt = 1; 3175 iovs[0].iov_base = NULL; 3176 iovs[0].iov_len = 0; 3177 3178 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3179 CU_ASSERT(rc == 0); 3180 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3181 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3182 alignment)); 3183 stub_complete_io(1); 3184 3185 /* Pass iov without allocated buffer with 1024 alignment required */ 3186 alignment = 1024; 3187 bdev->required_alignment = spdk_u32log2(alignment); 3188 3189 iovcnt = 1; 3190 iovs[0].iov_base = NULL; 3191 iovs[0].iov_len = 0; 3192 3193 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3194 CU_ASSERT(rc == 0); 3195 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3196 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3197 alignment)); 3198 stub_complete_io(1); 3199 3200 spdk_put_io_channel(io_ch); 3201 spdk_bdev_close(desc); 3202 free_bdev(bdev); 3203 fn_table.submit_request = stub_submit_request; 3204 spdk_bdev_finish(bdev_fini_cb, NULL); 3205 poll_threads(); 3206 3207 free(buf); 3208 } 3209 3210 static void 3211 bdev_io_alignment_with_boundary(void) 3212 { 3213 struct spdk_bdev *bdev; 3214 struct spdk_bdev_desc *desc = NULL; 3215 struct spdk_io_channel *io_ch; 3216 struct spdk_bdev_opts bdev_opts = {}; 3217 int rc; 3218 void *buf = NULL; 3219 struct iovec iovs[2]; 3220 int iovcnt; 3221 uint64_t alignment; 3222 3223 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3224 bdev_opts.bdev_io_pool_size = 20; 3225 bdev_opts.bdev_io_cache_size = 2; 3226 3227 bdev_opts.opts_size = sizeof(bdev_opts); 3228 rc = spdk_bdev_set_opts(&bdev_opts); 3229 CU_ASSERT(rc == 0); 3230 spdk_bdev_initialize(bdev_init_cb, NULL); 3231 3232 fn_table.submit_request = stub_submit_request_get_buf; 3233 bdev = allocate_bdev("bdev0"); 3234 3235 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3236 CU_ASSERT(rc == 0); 3237 CU_ASSERT(desc != NULL); 3238 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3239 io_ch = spdk_bdev_get_io_channel(desc); 3240 CU_ASSERT(io_ch != NULL); 3241 3242 /* Create aligned buffer */ 3243 rc = posix_memalign(&buf, 4096, 131072); 3244 SPDK_CU_ASSERT_FATAL(rc == 0); 3245 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3246 3247 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3248 alignment = 512; 3249 bdev->required_alignment = spdk_u32log2(alignment); 3250 bdev->optimal_io_boundary = 2; 3251 bdev->split_on_optimal_io_boundary = true; 3252 3253 iovcnt = 1; 3254 iovs[0].iov_base = NULL; 3255 iovs[0].iov_len = 512 * 3; 3256 3257 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3258 CU_ASSERT(rc == 0); 3259 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3260 stub_complete_io(2); 3261 3262 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3263 alignment = 512; 3264 bdev->required_alignment = spdk_u32log2(alignment); 3265 bdev->optimal_io_boundary = 16; 3266 bdev->split_on_optimal_io_boundary = true; 3267 3268 iovcnt = 1; 3269 iovs[0].iov_base = NULL; 3270 iovs[0].iov_len = 512 * 16; 3271 3272 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3273 CU_ASSERT(rc == 0); 3274 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3275 stub_complete_io(2); 3276 3277 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3278 alignment = 512; 3279 bdev->required_alignment = spdk_u32log2(alignment); 3280 bdev->optimal_io_boundary = 128; 3281 bdev->split_on_optimal_io_boundary = true; 3282 3283 iovcnt = 1; 3284 iovs[0].iov_base = buf + 16; 3285 iovs[0].iov_len = 512 * 160; 3286 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3287 CU_ASSERT(rc == 0); 3288 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3289 stub_complete_io(2); 3290 3291 /* 512 * 3 with 2 IO boundary */ 3292 alignment = 512; 3293 bdev->required_alignment = spdk_u32log2(alignment); 3294 bdev->optimal_io_boundary = 2; 3295 bdev->split_on_optimal_io_boundary = true; 3296 3297 iovcnt = 2; 3298 iovs[0].iov_base = buf + 16; 3299 iovs[0].iov_len = 512; 3300 iovs[1].iov_base = buf + 16 + 512 + 32; 3301 iovs[1].iov_len = 1024; 3302 3303 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3304 CU_ASSERT(rc == 0); 3305 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3306 stub_complete_io(2); 3307 3308 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3309 CU_ASSERT(rc == 0); 3310 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3311 stub_complete_io(2); 3312 3313 /* 512 * 64 with 32 IO boundary */ 3314 bdev->optimal_io_boundary = 32; 3315 iovcnt = 2; 3316 iovs[0].iov_base = buf + 16; 3317 iovs[0].iov_len = 16384; 3318 iovs[1].iov_base = buf + 16 + 16384 + 32; 3319 iovs[1].iov_len = 16384; 3320 3321 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3322 CU_ASSERT(rc == 0); 3323 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3324 stub_complete_io(3); 3325 3326 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3327 CU_ASSERT(rc == 0); 3328 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3329 stub_complete_io(3); 3330 3331 /* 512 * 160 with 32 IO boundary */ 3332 iovcnt = 1; 3333 iovs[0].iov_base = buf + 16; 3334 iovs[0].iov_len = 16384 + 65536; 3335 3336 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3337 CU_ASSERT(rc == 0); 3338 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3339 stub_complete_io(6); 3340 3341 spdk_put_io_channel(io_ch); 3342 spdk_bdev_close(desc); 3343 free_bdev(bdev); 3344 fn_table.submit_request = stub_submit_request; 3345 spdk_bdev_finish(bdev_fini_cb, NULL); 3346 poll_threads(); 3347 3348 free(buf); 3349 } 3350 3351 static void 3352 histogram_status_cb(void *cb_arg, int status) 3353 { 3354 g_status = status; 3355 } 3356 3357 static void 3358 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3359 { 3360 g_status = status; 3361 g_histogram = histogram; 3362 } 3363 3364 static void 3365 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3366 uint64_t total, uint64_t so_far) 3367 { 3368 g_count += count; 3369 } 3370 3371 static void 3372 bdev_histograms(void) 3373 { 3374 struct spdk_bdev *bdev; 3375 struct spdk_bdev_desc *desc = NULL; 3376 struct spdk_io_channel *ch; 3377 struct spdk_histogram_data *histogram; 3378 uint8_t buf[4096]; 3379 int rc; 3380 3381 spdk_bdev_initialize(bdev_init_cb, NULL); 3382 3383 bdev = allocate_bdev("bdev"); 3384 3385 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3386 CU_ASSERT(rc == 0); 3387 CU_ASSERT(desc != NULL); 3388 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3389 3390 ch = spdk_bdev_get_io_channel(desc); 3391 CU_ASSERT(ch != NULL); 3392 3393 /* Enable histogram */ 3394 g_status = -1; 3395 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3396 poll_threads(); 3397 CU_ASSERT(g_status == 0); 3398 CU_ASSERT(bdev->internal.histogram_enabled == true); 3399 3400 /* Allocate histogram */ 3401 histogram = spdk_histogram_data_alloc(); 3402 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3403 3404 /* Check if histogram is zeroed */ 3405 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3406 poll_threads(); 3407 CU_ASSERT(g_status == 0); 3408 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3409 3410 g_count = 0; 3411 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3412 3413 CU_ASSERT(g_count == 0); 3414 3415 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3416 CU_ASSERT(rc == 0); 3417 3418 spdk_delay_us(10); 3419 stub_complete_io(1); 3420 poll_threads(); 3421 3422 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3423 CU_ASSERT(rc == 0); 3424 3425 spdk_delay_us(10); 3426 stub_complete_io(1); 3427 poll_threads(); 3428 3429 /* Check if histogram gathered data from all I/O channels */ 3430 g_histogram = NULL; 3431 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3432 poll_threads(); 3433 CU_ASSERT(g_status == 0); 3434 CU_ASSERT(bdev->internal.histogram_enabled == true); 3435 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3436 3437 g_count = 0; 3438 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3439 CU_ASSERT(g_count == 2); 3440 3441 /* Disable histogram */ 3442 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3443 poll_threads(); 3444 CU_ASSERT(g_status == 0); 3445 CU_ASSERT(bdev->internal.histogram_enabled == false); 3446 3447 /* Try to run histogram commands on disabled bdev */ 3448 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3449 poll_threads(); 3450 CU_ASSERT(g_status == -EFAULT); 3451 3452 spdk_histogram_data_free(histogram); 3453 spdk_put_io_channel(ch); 3454 spdk_bdev_close(desc); 3455 free_bdev(bdev); 3456 spdk_bdev_finish(bdev_fini_cb, NULL); 3457 poll_threads(); 3458 } 3459 3460 static void 3461 _bdev_compare(bool emulated) 3462 { 3463 struct spdk_bdev *bdev; 3464 struct spdk_bdev_desc *desc = NULL; 3465 struct spdk_io_channel *ioch; 3466 struct ut_expected_io *expected_io; 3467 uint64_t offset, num_blocks; 3468 uint32_t num_completed; 3469 char aa_buf[512]; 3470 char bb_buf[512]; 3471 struct iovec compare_iov; 3472 uint8_t expected_io_type; 3473 int rc; 3474 3475 if (emulated) { 3476 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3477 } else { 3478 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3479 } 3480 3481 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3482 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3483 3484 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3485 3486 spdk_bdev_initialize(bdev_init_cb, NULL); 3487 fn_table.submit_request = stub_submit_request_get_buf; 3488 bdev = allocate_bdev("bdev"); 3489 3490 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3491 CU_ASSERT_EQUAL(rc, 0); 3492 SPDK_CU_ASSERT_FATAL(desc != NULL); 3493 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3494 ioch = spdk_bdev_get_io_channel(desc); 3495 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3496 3497 fn_table.submit_request = stub_submit_request_get_buf; 3498 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3499 3500 offset = 50; 3501 num_blocks = 1; 3502 compare_iov.iov_base = aa_buf; 3503 compare_iov.iov_len = sizeof(aa_buf); 3504 3505 /* 1. successful compare */ 3506 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3507 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3508 3509 g_io_done = false; 3510 g_compare_read_buf = aa_buf; 3511 g_compare_read_buf_len = sizeof(aa_buf); 3512 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3513 CU_ASSERT_EQUAL(rc, 0); 3514 num_completed = stub_complete_io(1); 3515 CU_ASSERT_EQUAL(num_completed, 1); 3516 CU_ASSERT(g_io_done == true); 3517 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3518 3519 /* 2. miscompare */ 3520 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3521 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3522 3523 g_io_done = false; 3524 g_compare_read_buf = bb_buf; 3525 g_compare_read_buf_len = sizeof(bb_buf); 3526 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3527 CU_ASSERT_EQUAL(rc, 0); 3528 num_completed = stub_complete_io(1); 3529 CU_ASSERT_EQUAL(num_completed, 1); 3530 CU_ASSERT(g_io_done == true); 3531 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3532 3533 spdk_put_io_channel(ioch); 3534 spdk_bdev_close(desc); 3535 free_bdev(bdev); 3536 fn_table.submit_request = stub_submit_request; 3537 spdk_bdev_finish(bdev_fini_cb, NULL); 3538 poll_threads(); 3539 3540 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3541 3542 g_compare_read_buf = NULL; 3543 } 3544 3545 static void 3546 _bdev_compare_with_md(bool emulated) 3547 { 3548 struct spdk_bdev *bdev; 3549 struct spdk_bdev_desc *desc = NULL; 3550 struct spdk_io_channel *ioch; 3551 struct ut_expected_io *expected_io; 3552 uint64_t offset, num_blocks; 3553 uint32_t num_completed; 3554 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3555 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3556 char buf_miscompare[1024 /* 2 * blocklen */]; 3557 char md_buf[16]; 3558 char md_buf_miscompare[16]; 3559 struct iovec compare_iov; 3560 uint8_t expected_io_type; 3561 int rc; 3562 3563 if (emulated) { 3564 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3565 } else { 3566 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3567 } 3568 3569 memset(buf, 0xaa, sizeof(buf)); 3570 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3571 /* make last md different */ 3572 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3573 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3574 memset(md_buf, 0xaa, 16); 3575 memset(md_buf_miscompare, 0xbb, 16); 3576 3577 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3578 3579 spdk_bdev_initialize(bdev_init_cb, NULL); 3580 fn_table.submit_request = stub_submit_request_get_buf; 3581 bdev = allocate_bdev("bdev"); 3582 3583 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3584 CU_ASSERT_EQUAL(rc, 0); 3585 SPDK_CU_ASSERT_FATAL(desc != NULL); 3586 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3587 ioch = spdk_bdev_get_io_channel(desc); 3588 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3589 3590 fn_table.submit_request = stub_submit_request_get_buf; 3591 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3592 3593 offset = 50; 3594 num_blocks = 2; 3595 3596 /* interleaved md & data */ 3597 bdev->md_interleave = true; 3598 bdev->md_len = 8; 3599 bdev->blocklen = 512 + 8; 3600 compare_iov.iov_base = buf; 3601 compare_iov.iov_len = sizeof(buf); 3602 3603 /* 1. successful compare with md interleaved */ 3604 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3606 3607 g_io_done = false; 3608 g_compare_read_buf = buf; 3609 g_compare_read_buf_len = sizeof(buf); 3610 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3611 CU_ASSERT_EQUAL(rc, 0); 3612 num_completed = stub_complete_io(1); 3613 CU_ASSERT_EQUAL(num_completed, 1); 3614 CU_ASSERT(g_io_done == true); 3615 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3616 3617 /* 2. miscompare with md interleaved */ 3618 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3619 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3620 3621 g_io_done = false; 3622 g_compare_read_buf = buf_interleaved_miscompare; 3623 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3624 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3625 CU_ASSERT_EQUAL(rc, 0); 3626 num_completed = stub_complete_io(1); 3627 CU_ASSERT_EQUAL(num_completed, 1); 3628 CU_ASSERT(g_io_done == true); 3629 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3630 3631 /* Separate data & md buffers */ 3632 bdev->md_interleave = false; 3633 bdev->blocklen = 512; 3634 compare_iov.iov_base = buf; 3635 compare_iov.iov_len = 1024; 3636 3637 /* 3. successful compare with md separated */ 3638 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3640 3641 g_io_done = false; 3642 g_compare_read_buf = buf; 3643 g_compare_read_buf_len = 1024; 3644 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3645 g_compare_md_buf = md_buf; 3646 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3647 offset, num_blocks, io_done, NULL); 3648 CU_ASSERT_EQUAL(rc, 0); 3649 num_completed = stub_complete_io(1); 3650 CU_ASSERT_EQUAL(num_completed, 1); 3651 CU_ASSERT(g_io_done == true); 3652 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3653 3654 /* 4. miscompare with md separated where md buf is different */ 3655 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3656 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3657 3658 g_io_done = false; 3659 g_compare_read_buf = buf; 3660 g_compare_read_buf_len = 1024; 3661 g_compare_md_buf = md_buf_miscompare; 3662 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3663 offset, num_blocks, io_done, NULL); 3664 CU_ASSERT_EQUAL(rc, 0); 3665 num_completed = stub_complete_io(1); 3666 CU_ASSERT_EQUAL(num_completed, 1); 3667 CU_ASSERT(g_io_done == true); 3668 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3669 3670 /* 5. miscompare with md separated where buf is different */ 3671 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3672 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3673 3674 g_io_done = false; 3675 g_compare_read_buf = buf_miscompare; 3676 g_compare_read_buf_len = sizeof(buf_miscompare); 3677 g_compare_md_buf = md_buf; 3678 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3679 offset, num_blocks, io_done, NULL); 3680 CU_ASSERT_EQUAL(rc, 0); 3681 num_completed = stub_complete_io(1); 3682 CU_ASSERT_EQUAL(num_completed, 1); 3683 CU_ASSERT(g_io_done == true); 3684 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3685 3686 bdev->md_len = 0; 3687 g_compare_md_buf = NULL; 3688 3689 spdk_put_io_channel(ioch); 3690 spdk_bdev_close(desc); 3691 free_bdev(bdev); 3692 fn_table.submit_request = stub_submit_request; 3693 spdk_bdev_finish(bdev_fini_cb, NULL); 3694 poll_threads(); 3695 3696 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3697 3698 g_compare_read_buf = NULL; 3699 } 3700 3701 static void 3702 bdev_compare(void) 3703 { 3704 _bdev_compare(false); 3705 _bdev_compare_with_md(false); 3706 } 3707 3708 static void 3709 bdev_compare_emulated(void) 3710 { 3711 _bdev_compare(true); 3712 _bdev_compare_with_md(true); 3713 } 3714 3715 static void 3716 bdev_compare_and_write(void) 3717 { 3718 struct spdk_bdev *bdev; 3719 struct spdk_bdev_desc *desc = NULL; 3720 struct spdk_io_channel *ioch; 3721 struct ut_expected_io *expected_io; 3722 uint64_t offset, num_blocks; 3723 uint32_t num_completed; 3724 char aa_buf[512]; 3725 char bb_buf[512]; 3726 char cc_buf[512]; 3727 char write_buf[512]; 3728 struct iovec compare_iov; 3729 struct iovec write_iov; 3730 int rc; 3731 3732 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3733 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3734 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3735 3736 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3737 3738 spdk_bdev_initialize(bdev_init_cb, NULL); 3739 fn_table.submit_request = stub_submit_request_get_buf; 3740 bdev = allocate_bdev("bdev"); 3741 3742 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3743 CU_ASSERT_EQUAL(rc, 0); 3744 SPDK_CU_ASSERT_FATAL(desc != NULL); 3745 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3746 ioch = spdk_bdev_get_io_channel(desc); 3747 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3748 3749 fn_table.submit_request = stub_submit_request_get_buf; 3750 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3751 3752 offset = 50; 3753 num_blocks = 1; 3754 compare_iov.iov_base = aa_buf; 3755 compare_iov.iov_len = sizeof(aa_buf); 3756 write_iov.iov_base = bb_buf; 3757 write_iov.iov_len = sizeof(bb_buf); 3758 3759 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3760 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3761 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3763 3764 g_io_done = false; 3765 g_compare_read_buf = aa_buf; 3766 g_compare_read_buf_len = sizeof(aa_buf); 3767 memset(write_buf, 0, sizeof(write_buf)); 3768 g_compare_write_buf = write_buf; 3769 g_compare_write_buf_len = sizeof(write_buf); 3770 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3771 offset, num_blocks, io_done, NULL); 3772 /* Trigger range locking */ 3773 poll_threads(); 3774 CU_ASSERT_EQUAL(rc, 0); 3775 num_completed = stub_complete_io(1); 3776 CU_ASSERT_EQUAL(num_completed, 1); 3777 CU_ASSERT(g_io_done == false); 3778 num_completed = stub_complete_io(1); 3779 /* Trigger range unlocking */ 3780 poll_threads(); 3781 CU_ASSERT_EQUAL(num_completed, 1); 3782 CU_ASSERT(g_io_done == true); 3783 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3784 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3785 3786 /* Test miscompare */ 3787 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3789 3790 g_io_done = false; 3791 g_compare_read_buf = cc_buf; 3792 g_compare_read_buf_len = sizeof(cc_buf); 3793 memset(write_buf, 0, sizeof(write_buf)); 3794 g_compare_write_buf = write_buf; 3795 g_compare_write_buf_len = sizeof(write_buf); 3796 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3797 offset, num_blocks, io_done, NULL); 3798 /* Trigger range locking */ 3799 poll_threads(); 3800 CU_ASSERT_EQUAL(rc, 0); 3801 num_completed = stub_complete_io(1); 3802 /* Trigger range unlocking earlier because we expect error here */ 3803 poll_threads(); 3804 CU_ASSERT_EQUAL(num_completed, 1); 3805 CU_ASSERT(g_io_done == true); 3806 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3807 num_completed = stub_complete_io(1); 3808 CU_ASSERT_EQUAL(num_completed, 0); 3809 3810 spdk_put_io_channel(ioch); 3811 spdk_bdev_close(desc); 3812 free_bdev(bdev); 3813 fn_table.submit_request = stub_submit_request; 3814 spdk_bdev_finish(bdev_fini_cb, NULL); 3815 poll_threads(); 3816 3817 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3818 3819 g_compare_read_buf = NULL; 3820 g_compare_write_buf = NULL; 3821 } 3822 3823 static void 3824 bdev_write_zeroes(void) 3825 { 3826 struct spdk_bdev *bdev; 3827 struct spdk_bdev_desc *desc = NULL; 3828 struct spdk_io_channel *ioch; 3829 struct ut_expected_io *expected_io; 3830 uint64_t offset, num_io_blocks, num_blocks; 3831 uint32_t num_completed, num_requests; 3832 int rc; 3833 3834 spdk_bdev_initialize(bdev_init_cb, NULL); 3835 bdev = allocate_bdev("bdev"); 3836 3837 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3838 CU_ASSERT_EQUAL(rc, 0); 3839 SPDK_CU_ASSERT_FATAL(desc != NULL); 3840 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3841 ioch = spdk_bdev_get_io_channel(desc); 3842 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3843 3844 fn_table.submit_request = stub_submit_request; 3845 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3846 3847 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3848 bdev->md_len = 0; 3849 bdev->blocklen = 4096; 3850 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3851 3852 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3853 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3854 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3855 CU_ASSERT_EQUAL(rc, 0); 3856 num_completed = stub_complete_io(1); 3857 CU_ASSERT_EQUAL(num_completed, 1); 3858 3859 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3860 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3861 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3862 num_requests = 2; 3863 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3864 3865 for (offset = 0; offset < num_requests; ++offset) { 3866 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3867 offset * num_io_blocks, num_io_blocks, 0); 3868 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3869 } 3870 3871 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3872 CU_ASSERT_EQUAL(rc, 0); 3873 num_completed = stub_complete_io(num_requests); 3874 CU_ASSERT_EQUAL(num_completed, num_requests); 3875 3876 /* Check that the splitting is correct if bdev has interleaved metadata */ 3877 bdev->md_interleave = true; 3878 bdev->md_len = 64; 3879 bdev->blocklen = 4096 + 64; 3880 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3881 3882 num_requests = offset = 0; 3883 while (offset < num_blocks) { 3884 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3885 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3886 offset, num_io_blocks, 0); 3887 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3888 offset += num_io_blocks; 3889 num_requests++; 3890 } 3891 3892 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3893 CU_ASSERT_EQUAL(rc, 0); 3894 num_completed = stub_complete_io(num_requests); 3895 CU_ASSERT_EQUAL(num_completed, num_requests); 3896 num_completed = stub_complete_io(num_requests); 3897 assert(num_completed == 0); 3898 3899 /* Check the the same for separate metadata buffer */ 3900 bdev->md_interleave = false; 3901 bdev->md_len = 64; 3902 bdev->blocklen = 4096; 3903 3904 num_requests = offset = 0; 3905 while (offset < num_blocks) { 3906 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3907 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3908 offset, num_io_blocks, 0); 3909 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3910 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3911 offset += num_io_blocks; 3912 num_requests++; 3913 } 3914 3915 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3916 CU_ASSERT_EQUAL(rc, 0); 3917 num_completed = stub_complete_io(num_requests); 3918 CU_ASSERT_EQUAL(num_completed, num_requests); 3919 3920 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3921 spdk_put_io_channel(ioch); 3922 spdk_bdev_close(desc); 3923 free_bdev(bdev); 3924 spdk_bdev_finish(bdev_fini_cb, NULL); 3925 poll_threads(); 3926 } 3927 3928 static void 3929 bdev_zcopy_write(void) 3930 { 3931 struct spdk_bdev *bdev; 3932 struct spdk_bdev_desc *desc = NULL; 3933 struct spdk_io_channel *ioch; 3934 struct ut_expected_io *expected_io; 3935 uint64_t offset, num_blocks; 3936 uint32_t num_completed; 3937 char aa_buf[512]; 3938 struct iovec iov; 3939 int rc; 3940 const bool populate = false; 3941 const bool commit = true; 3942 3943 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3944 3945 spdk_bdev_initialize(bdev_init_cb, NULL); 3946 bdev = allocate_bdev("bdev"); 3947 3948 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3949 CU_ASSERT_EQUAL(rc, 0); 3950 SPDK_CU_ASSERT_FATAL(desc != NULL); 3951 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3952 ioch = spdk_bdev_get_io_channel(desc); 3953 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3954 3955 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3956 3957 offset = 50; 3958 num_blocks = 1; 3959 iov.iov_base = NULL; 3960 iov.iov_len = 0; 3961 3962 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3963 g_zcopy_read_buf_len = (uint32_t) -1; 3964 /* Do a zcopy start for a write (populate=false) */ 3965 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3966 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3967 g_io_done = false; 3968 g_zcopy_write_buf = aa_buf; 3969 g_zcopy_write_buf_len = sizeof(aa_buf); 3970 g_zcopy_bdev_io = NULL; 3971 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3972 CU_ASSERT_EQUAL(rc, 0); 3973 num_completed = stub_complete_io(1); 3974 CU_ASSERT_EQUAL(num_completed, 1); 3975 CU_ASSERT(g_io_done == true); 3976 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3977 /* Check that the iov has been set up */ 3978 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3979 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3980 /* Check that the bdev_io has been saved */ 3981 CU_ASSERT(g_zcopy_bdev_io != NULL); 3982 /* Now do the zcopy end for a write (commit=true) */ 3983 g_io_done = false; 3984 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3985 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3986 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3987 CU_ASSERT_EQUAL(rc, 0); 3988 num_completed = stub_complete_io(1); 3989 CU_ASSERT_EQUAL(num_completed, 1); 3990 CU_ASSERT(g_io_done == true); 3991 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3992 /* Check the g_zcopy are reset by io_done */ 3993 CU_ASSERT(g_zcopy_write_buf == NULL); 3994 CU_ASSERT(g_zcopy_write_buf_len == 0); 3995 /* Check that io_done has freed the g_zcopy_bdev_io */ 3996 CU_ASSERT(g_zcopy_bdev_io == NULL); 3997 3998 /* Check the zcopy read buffer has not been touched which 3999 * ensures that the correct buffers were used. 4000 */ 4001 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4002 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4003 4004 spdk_put_io_channel(ioch); 4005 spdk_bdev_close(desc); 4006 free_bdev(bdev); 4007 spdk_bdev_finish(bdev_fini_cb, NULL); 4008 poll_threads(); 4009 } 4010 4011 static void 4012 bdev_zcopy_read(void) 4013 { 4014 struct spdk_bdev *bdev; 4015 struct spdk_bdev_desc *desc = NULL; 4016 struct spdk_io_channel *ioch; 4017 struct ut_expected_io *expected_io; 4018 uint64_t offset, num_blocks; 4019 uint32_t num_completed; 4020 char aa_buf[512]; 4021 struct iovec iov; 4022 int rc; 4023 const bool populate = true; 4024 const bool commit = false; 4025 4026 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4027 4028 spdk_bdev_initialize(bdev_init_cb, NULL); 4029 bdev = allocate_bdev("bdev"); 4030 4031 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4032 CU_ASSERT_EQUAL(rc, 0); 4033 SPDK_CU_ASSERT_FATAL(desc != NULL); 4034 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4035 ioch = spdk_bdev_get_io_channel(desc); 4036 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4037 4038 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4039 4040 offset = 50; 4041 num_blocks = 1; 4042 iov.iov_base = NULL; 4043 iov.iov_len = 0; 4044 4045 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4046 g_zcopy_write_buf_len = (uint32_t) -1; 4047 4048 /* Do a zcopy start for a read (populate=true) */ 4049 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4050 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4051 g_io_done = false; 4052 g_zcopy_read_buf = aa_buf; 4053 g_zcopy_read_buf_len = sizeof(aa_buf); 4054 g_zcopy_bdev_io = NULL; 4055 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4056 CU_ASSERT_EQUAL(rc, 0); 4057 num_completed = stub_complete_io(1); 4058 CU_ASSERT_EQUAL(num_completed, 1); 4059 CU_ASSERT(g_io_done == true); 4060 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4061 /* Check that the iov has been set up */ 4062 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4063 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4064 /* Check that the bdev_io has been saved */ 4065 CU_ASSERT(g_zcopy_bdev_io != NULL); 4066 4067 /* Now do the zcopy end for a read (commit=false) */ 4068 g_io_done = false; 4069 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4070 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4071 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4072 CU_ASSERT_EQUAL(rc, 0); 4073 num_completed = stub_complete_io(1); 4074 CU_ASSERT_EQUAL(num_completed, 1); 4075 CU_ASSERT(g_io_done == true); 4076 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4077 /* Check the g_zcopy are reset by io_done */ 4078 CU_ASSERT(g_zcopy_read_buf == NULL); 4079 CU_ASSERT(g_zcopy_read_buf_len == 0); 4080 /* Check that io_done has freed the g_zcopy_bdev_io */ 4081 CU_ASSERT(g_zcopy_bdev_io == NULL); 4082 4083 /* Check the zcopy write buffer has not been touched which 4084 * ensures that the correct buffers were used. 4085 */ 4086 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4087 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4088 4089 spdk_put_io_channel(ioch); 4090 spdk_bdev_close(desc); 4091 free_bdev(bdev); 4092 spdk_bdev_finish(bdev_fini_cb, NULL); 4093 poll_threads(); 4094 } 4095 4096 static void 4097 bdev_open_while_hotremove(void) 4098 { 4099 struct spdk_bdev *bdev; 4100 struct spdk_bdev_desc *desc[2] = {}; 4101 int rc; 4102 4103 bdev = allocate_bdev("bdev"); 4104 4105 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4106 CU_ASSERT(rc == 0); 4107 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4108 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4109 4110 spdk_bdev_unregister(bdev, NULL, NULL); 4111 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4112 poll_threads(); 4113 4114 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4115 CU_ASSERT(rc == -ENODEV); 4116 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4117 4118 spdk_bdev_close(desc[0]); 4119 free_bdev(bdev); 4120 } 4121 4122 static void 4123 bdev_close_while_hotremove(void) 4124 { 4125 struct spdk_bdev *bdev; 4126 struct spdk_bdev_desc *desc = NULL; 4127 int rc = 0; 4128 4129 bdev = allocate_bdev("bdev"); 4130 4131 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4132 CU_ASSERT_EQUAL(rc, 0); 4133 SPDK_CU_ASSERT_FATAL(desc != NULL); 4134 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4135 4136 /* Simulate hot-unplug by unregistering bdev */ 4137 g_event_type1 = 0xFF; 4138 g_unregister_arg = NULL; 4139 g_unregister_rc = -1; 4140 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4141 /* Close device while remove event is in flight */ 4142 spdk_bdev_close(desc); 4143 4144 /* Ensure that unregister callback is delayed */ 4145 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4146 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4147 4148 poll_threads(); 4149 4150 /* Event callback shall not be issued because device was closed */ 4151 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4152 /* Unregister callback is issued */ 4153 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4154 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4155 4156 free_bdev(bdev); 4157 } 4158 4159 static void 4160 bdev_open_ext(void) 4161 { 4162 struct spdk_bdev *bdev; 4163 struct spdk_bdev_desc *desc1 = NULL; 4164 struct spdk_bdev_desc *desc2 = NULL; 4165 int rc = 0; 4166 4167 bdev = allocate_bdev("bdev"); 4168 4169 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4170 CU_ASSERT_EQUAL(rc, -EINVAL); 4171 4172 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4173 CU_ASSERT_EQUAL(rc, 0); 4174 4175 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4176 CU_ASSERT_EQUAL(rc, 0); 4177 4178 g_event_type1 = 0xFF; 4179 g_event_type2 = 0xFF; 4180 4181 /* Simulate hot-unplug by unregistering bdev */ 4182 spdk_bdev_unregister(bdev, NULL, NULL); 4183 poll_threads(); 4184 4185 /* Check if correct events have been triggered in event callback fn */ 4186 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4187 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4188 4189 free_bdev(bdev); 4190 poll_threads(); 4191 } 4192 4193 static void 4194 bdev_open_ext_unregister(void) 4195 { 4196 struct spdk_bdev *bdev; 4197 struct spdk_bdev_desc *desc1 = NULL; 4198 struct spdk_bdev_desc *desc2 = NULL; 4199 struct spdk_bdev_desc *desc3 = NULL; 4200 struct spdk_bdev_desc *desc4 = NULL; 4201 int rc = 0; 4202 4203 bdev = allocate_bdev("bdev"); 4204 4205 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4206 CU_ASSERT_EQUAL(rc, -EINVAL); 4207 4208 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4209 CU_ASSERT_EQUAL(rc, 0); 4210 4211 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4212 CU_ASSERT_EQUAL(rc, 0); 4213 4214 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4215 CU_ASSERT_EQUAL(rc, 0); 4216 4217 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4218 CU_ASSERT_EQUAL(rc, 0); 4219 4220 g_event_type1 = 0xFF; 4221 g_event_type2 = 0xFF; 4222 g_event_type3 = 0xFF; 4223 g_event_type4 = 0xFF; 4224 4225 g_unregister_arg = NULL; 4226 g_unregister_rc = -1; 4227 4228 /* Simulate hot-unplug by unregistering bdev */ 4229 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4230 4231 /* 4232 * Unregister is handled asynchronously and event callback 4233 * (i.e., above bdev_open_cbN) will be called. 4234 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4235 * close the desc3 and desc4 so that the bdev is not closed. 4236 */ 4237 poll_threads(); 4238 4239 /* Check if correct events have been triggered in event callback fn */ 4240 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4241 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4242 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4243 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4244 4245 /* Check that unregister callback is delayed */ 4246 CU_ASSERT(g_unregister_arg == NULL); 4247 CU_ASSERT(g_unregister_rc == -1); 4248 4249 /* 4250 * Explicitly close desc3. As desc4 is still opened there, the 4251 * unergister callback is still delayed to execute. 4252 */ 4253 spdk_bdev_close(desc3); 4254 CU_ASSERT(g_unregister_arg == NULL); 4255 CU_ASSERT(g_unregister_rc == -1); 4256 4257 /* 4258 * Explicitly close desc4 to trigger the ongoing bdev unregister 4259 * operation after last desc is closed. 4260 */ 4261 spdk_bdev_close(desc4); 4262 4263 /* Poll the thread for the async unregister operation */ 4264 poll_threads(); 4265 4266 /* Check that unregister callback is executed */ 4267 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4268 CU_ASSERT(g_unregister_rc == 0); 4269 4270 free_bdev(bdev); 4271 poll_threads(); 4272 } 4273 4274 struct timeout_io_cb_arg { 4275 struct iovec iov; 4276 uint8_t type; 4277 }; 4278 4279 static int 4280 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4281 { 4282 struct spdk_bdev_io *bdev_io; 4283 int n = 0; 4284 4285 if (!ch) { 4286 return -1; 4287 } 4288 4289 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4290 n++; 4291 } 4292 4293 return n; 4294 } 4295 4296 static void 4297 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4298 { 4299 struct timeout_io_cb_arg *ctx = cb_arg; 4300 4301 ctx->type = bdev_io->type; 4302 ctx->iov.iov_base = bdev_io->iov.iov_base; 4303 ctx->iov.iov_len = bdev_io->iov.iov_len; 4304 } 4305 4306 static void 4307 bdev_set_io_timeout(void) 4308 { 4309 struct spdk_bdev *bdev; 4310 struct spdk_bdev_desc *desc = NULL; 4311 struct spdk_io_channel *io_ch = NULL; 4312 struct spdk_bdev_channel *bdev_ch = NULL; 4313 struct timeout_io_cb_arg cb_arg; 4314 4315 spdk_bdev_initialize(bdev_init_cb, NULL); 4316 4317 bdev = allocate_bdev("bdev"); 4318 4319 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4320 SPDK_CU_ASSERT_FATAL(desc != NULL); 4321 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4322 4323 io_ch = spdk_bdev_get_io_channel(desc); 4324 CU_ASSERT(io_ch != NULL); 4325 4326 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4327 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4328 4329 /* This is the part1. 4330 * We will check the bdev_ch->io_submitted list 4331 * TO make sure that it can link IOs and only the user submitted IOs 4332 */ 4333 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4334 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4335 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4336 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4337 stub_complete_io(1); 4338 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4339 stub_complete_io(1); 4340 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4341 4342 /* Split IO */ 4343 bdev->optimal_io_boundary = 16; 4344 bdev->split_on_optimal_io_boundary = true; 4345 4346 /* Now test that a single-vector command is split correctly. 4347 * Offset 14, length 8, payload 0xF000 4348 * Child - Offset 14, length 2, payload 0xF000 4349 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4350 * 4351 * Set up the expected values before calling spdk_bdev_read_blocks 4352 */ 4353 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4354 /* We count all submitted IOs including IO that are generated by splitting. */ 4355 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4356 stub_complete_io(1); 4357 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4358 stub_complete_io(1); 4359 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4360 4361 /* Also include the reset IO */ 4362 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4363 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4364 poll_threads(); 4365 stub_complete_io(1); 4366 poll_threads(); 4367 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4368 4369 /* This is part2 4370 * Test the desc timeout poller register 4371 */ 4372 4373 /* Successfully set the timeout */ 4374 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4375 CU_ASSERT(desc->io_timeout_poller != NULL); 4376 CU_ASSERT(desc->timeout_in_sec == 30); 4377 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4378 CU_ASSERT(desc->cb_arg == &cb_arg); 4379 4380 /* Change the timeout limit */ 4381 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4382 CU_ASSERT(desc->io_timeout_poller != NULL); 4383 CU_ASSERT(desc->timeout_in_sec == 20); 4384 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4385 CU_ASSERT(desc->cb_arg == &cb_arg); 4386 4387 /* Disable the timeout */ 4388 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4389 CU_ASSERT(desc->io_timeout_poller == NULL); 4390 4391 /* This the part3 4392 * We will test to catch timeout IO and check whether the IO is 4393 * the submitted one. 4394 */ 4395 memset(&cb_arg, 0, sizeof(cb_arg)); 4396 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4397 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4398 4399 /* Don't reach the limit */ 4400 spdk_delay_us(15 * spdk_get_ticks_hz()); 4401 poll_threads(); 4402 CU_ASSERT(cb_arg.type == 0); 4403 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4404 CU_ASSERT(cb_arg.iov.iov_len == 0); 4405 4406 /* 15 + 15 = 30 reach the limit */ 4407 spdk_delay_us(15 * spdk_get_ticks_hz()); 4408 poll_threads(); 4409 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4410 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4411 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4412 stub_complete_io(1); 4413 4414 /* Use the same split IO above and check the IO */ 4415 memset(&cb_arg, 0, sizeof(cb_arg)); 4416 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4417 4418 /* The first child complete in time */ 4419 spdk_delay_us(15 * spdk_get_ticks_hz()); 4420 poll_threads(); 4421 stub_complete_io(1); 4422 CU_ASSERT(cb_arg.type == 0); 4423 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4424 CU_ASSERT(cb_arg.iov.iov_len == 0); 4425 4426 /* The second child reach the limit */ 4427 spdk_delay_us(15 * spdk_get_ticks_hz()); 4428 poll_threads(); 4429 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4430 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4431 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4432 stub_complete_io(1); 4433 4434 /* Also include the reset IO */ 4435 memset(&cb_arg, 0, sizeof(cb_arg)); 4436 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4437 spdk_delay_us(30 * spdk_get_ticks_hz()); 4438 poll_threads(); 4439 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4440 stub_complete_io(1); 4441 poll_threads(); 4442 4443 spdk_put_io_channel(io_ch); 4444 spdk_bdev_close(desc); 4445 free_bdev(bdev); 4446 spdk_bdev_finish(bdev_fini_cb, NULL); 4447 poll_threads(); 4448 } 4449 4450 static void 4451 bdev_set_qd_sampling(void) 4452 { 4453 struct spdk_bdev *bdev; 4454 struct spdk_bdev_desc *desc = NULL; 4455 struct spdk_io_channel *io_ch = NULL; 4456 struct spdk_bdev_channel *bdev_ch = NULL; 4457 struct timeout_io_cb_arg cb_arg; 4458 4459 spdk_bdev_initialize(bdev_init_cb, NULL); 4460 4461 bdev = allocate_bdev("bdev"); 4462 4463 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4464 SPDK_CU_ASSERT_FATAL(desc != NULL); 4465 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4466 4467 io_ch = spdk_bdev_get_io_channel(desc); 4468 CU_ASSERT(io_ch != NULL); 4469 4470 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4471 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4472 4473 /* This is the part1. 4474 * We will check the bdev_ch->io_submitted list 4475 * TO make sure that it can link IOs and only the user submitted IOs 4476 */ 4477 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4478 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4479 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4480 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4481 stub_complete_io(1); 4482 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4483 stub_complete_io(1); 4484 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4485 4486 /* This is the part2. 4487 * Test the bdev's qd poller register 4488 */ 4489 /* 1st Successfully set the qd sampling period */ 4490 spdk_bdev_set_qd_sampling_period(bdev, 10); 4491 CU_ASSERT(bdev->internal.new_period == 10); 4492 CU_ASSERT(bdev->internal.period == 10); 4493 CU_ASSERT(bdev->internal.qd_desc != NULL); 4494 poll_threads(); 4495 CU_ASSERT(bdev->internal.qd_poller != NULL); 4496 4497 /* 2nd Change the qd sampling period */ 4498 spdk_bdev_set_qd_sampling_period(bdev, 20); 4499 CU_ASSERT(bdev->internal.new_period == 20); 4500 CU_ASSERT(bdev->internal.period == 10); 4501 CU_ASSERT(bdev->internal.qd_desc != NULL); 4502 poll_threads(); 4503 CU_ASSERT(bdev->internal.qd_poller != NULL); 4504 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4505 4506 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4507 spdk_delay_us(20); 4508 poll_thread_times(0, 1); 4509 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4510 spdk_bdev_set_qd_sampling_period(bdev, 30); 4511 CU_ASSERT(bdev->internal.new_period == 30); 4512 CU_ASSERT(bdev->internal.period == 20); 4513 poll_threads(); 4514 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4515 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4516 4517 /* 4th Disable the qd sampling period */ 4518 spdk_bdev_set_qd_sampling_period(bdev, 0); 4519 CU_ASSERT(bdev->internal.new_period == 0); 4520 CU_ASSERT(bdev->internal.period == 30); 4521 poll_threads(); 4522 CU_ASSERT(bdev->internal.qd_poller == NULL); 4523 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4524 CU_ASSERT(bdev->internal.qd_desc == NULL); 4525 4526 /* This is the part3. 4527 * We will test the submitted IO and reset works 4528 * properly with the qd sampling. 4529 */ 4530 memset(&cb_arg, 0, sizeof(cb_arg)); 4531 spdk_bdev_set_qd_sampling_period(bdev, 1); 4532 poll_threads(); 4533 4534 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4535 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4536 4537 /* Also include the reset IO */ 4538 memset(&cb_arg, 0, sizeof(cb_arg)); 4539 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4540 poll_threads(); 4541 4542 /* Close the desc */ 4543 spdk_put_io_channel(io_ch); 4544 spdk_bdev_close(desc); 4545 4546 /* Complete the submitted IO and reset */ 4547 stub_complete_io(2); 4548 poll_threads(); 4549 4550 free_bdev(bdev); 4551 spdk_bdev_finish(bdev_fini_cb, NULL); 4552 poll_threads(); 4553 } 4554 4555 static void 4556 lba_range_overlap(void) 4557 { 4558 struct lba_range r1, r2; 4559 4560 r1.offset = 100; 4561 r1.length = 50; 4562 4563 r2.offset = 0; 4564 r2.length = 1; 4565 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4566 4567 r2.offset = 0; 4568 r2.length = 100; 4569 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4570 4571 r2.offset = 0; 4572 r2.length = 110; 4573 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4574 4575 r2.offset = 100; 4576 r2.length = 10; 4577 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4578 4579 r2.offset = 110; 4580 r2.length = 20; 4581 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4582 4583 r2.offset = 140; 4584 r2.length = 150; 4585 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4586 4587 r2.offset = 130; 4588 r2.length = 200; 4589 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4590 4591 r2.offset = 150; 4592 r2.length = 100; 4593 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4594 4595 r2.offset = 110; 4596 r2.length = 0; 4597 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4598 } 4599 4600 static bool g_lock_lba_range_done; 4601 static bool g_unlock_lba_range_done; 4602 4603 static void 4604 lock_lba_range_done(void *ctx, int status) 4605 { 4606 g_lock_lba_range_done = true; 4607 } 4608 4609 static void 4610 unlock_lba_range_done(void *ctx, int status) 4611 { 4612 g_unlock_lba_range_done = true; 4613 } 4614 4615 static void 4616 lock_lba_range_check_ranges(void) 4617 { 4618 struct spdk_bdev *bdev; 4619 struct spdk_bdev_desc *desc = NULL; 4620 struct spdk_io_channel *io_ch; 4621 struct spdk_bdev_channel *channel; 4622 struct lba_range *range; 4623 int ctx1; 4624 int rc; 4625 4626 spdk_bdev_initialize(bdev_init_cb, NULL); 4627 4628 bdev = allocate_bdev("bdev0"); 4629 4630 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4631 CU_ASSERT(rc == 0); 4632 CU_ASSERT(desc != NULL); 4633 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4634 io_ch = spdk_bdev_get_io_channel(desc); 4635 CU_ASSERT(io_ch != NULL); 4636 channel = spdk_io_channel_get_ctx(io_ch); 4637 4638 g_lock_lba_range_done = false; 4639 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4640 CU_ASSERT(rc == 0); 4641 poll_threads(); 4642 4643 CU_ASSERT(g_lock_lba_range_done == true); 4644 range = TAILQ_FIRST(&channel->locked_ranges); 4645 SPDK_CU_ASSERT_FATAL(range != NULL); 4646 CU_ASSERT(range->offset == 20); 4647 CU_ASSERT(range->length == 10); 4648 CU_ASSERT(range->owner_ch == channel); 4649 4650 /* Unlocks must exactly match a lock. */ 4651 g_unlock_lba_range_done = false; 4652 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4653 CU_ASSERT(rc == -EINVAL); 4654 CU_ASSERT(g_unlock_lba_range_done == false); 4655 4656 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4657 CU_ASSERT(rc == 0); 4658 spdk_delay_us(100); 4659 poll_threads(); 4660 4661 CU_ASSERT(g_unlock_lba_range_done == true); 4662 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4663 4664 spdk_put_io_channel(io_ch); 4665 spdk_bdev_close(desc); 4666 free_bdev(bdev); 4667 spdk_bdev_finish(bdev_fini_cb, NULL); 4668 poll_threads(); 4669 } 4670 4671 static void 4672 lock_lba_range_with_io_outstanding(void) 4673 { 4674 struct spdk_bdev *bdev; 4675 struct spdk_bdev_desc *desc = NULL; 4676 struct spdk_io_channel *io_ch; 4677 struct spdk_bdev_channel *channel; 4678 struct lba_range *range; 4679 char buf[4096]; 4680 int ctx1; 4681 int rc; 4682 4683 spdk_bdev_initialize(bdev_init_cb, NULL); 4684 4685 bdev = allocate_bdev("bdev0"); 4686 4687 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4688 CU_ASSERT(rc == 0); 4689 CU_ASSERT(desc != NULL); 4690 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4691 io_ch = spdk_bdev_get_io_channel(desc); 4692 CU_ASSERT(io_ch != NULL); 4693 channel = spdk_io_channel_get_ctx(io_ch); 4694 4695 g_io_done = false; 4696 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4697 CU_ASSERT(rc == 0); 4698 4699 g_lock_lba_range_done = false; 4700 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4701 CU_ASSERT(rc == 0); 4702 poll_threads(); 4703 4704 /* The lock should immediately become valid, since there are no outstanding 4705 * write I/O. 4706 */ 4707 CU_ASSERT(g_io_done == false); 4708 CU_ASSERT(g_lock_lba_range_done == true); 4709 range = TAILQ_FIRST(&channel->locked_ranges); 4710 SPDK_CU_ASSERT_FATAL(range != NULL); 4711 CU_ASSERT(range->offset == 20); 4712 CU_ASSERT(range->length == 10); 4713 CU_ASSERT(range->owner_ch == channel); 4714 CU_ASSERT(range->locked_ctx == &ctx1); 4715 4716 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4717 CU_ASSERT(rc == 0); 4718 stub_complete_io(1); 4719 spdk_delay_us(100); 4720 poll_threads(); 4721 4722 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4723 4724 /* Now try again, but with a write I/O. */ 4725 g_io_done = false; 4726 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4727 CU_ASSERT(rc == 0); 4728 4729 g_lock_lba_range_done = false; 4730 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4731 CU_ASSERT(rc == 0); 4732 poll_threads(); 4733 4734 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4735 * But note that the range should be on the channel's locked_list, to make sure no 4736 * new write I/O are started. 4737 */ 4738 CU_ASSERT(g_io_done == false); 4739 CU_ASSERT(g_lock_lba_range_done == false); 4740 range = TAILQ_FIRST(&channel->locked_ranges); 4741 SPDK_CU_ASSERT_FATAL(range != NULL); 4742 CU_ASSERT(range->offset == 20); 4743 CU_ASSERT(range->length == 10); 4744 4745 /* Complete the write I/O. This should make the lock valid (checked by confirming 4746 * our callback was invoked). 4747 */ 4748 stub_complete_io(1); 4749 spdk_delay_us(100); 4750 poll_threads(); 4751 CU_ASSERT(g_io_done == true); 4752 CU_ASSERT(g_lock_lba_range_done == true); 4753 4754 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4755 CU_ASSERT(rc == 0); 4756 poll_threads(); 4757 4758 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4759 4760 spdk_put_io_channel(io_ch); 4761 spdk_bdev_close(desc); 4762 free_bdev(bdev); 4763 spdk_bdev_finish(bdev_fini_cb, NULL); 4764 poll_threads(); 4765 } 4766 4767 static void 4768 lock_lba_range_overlapped(void) 4769 { 4770 struct spdk_bdev *bdev; 4771 struct spdk_bdev_desc *desc = NULL; 4772 struct spdk_io_channel *io_ch; 4773 struct spdk_bdev_channel *channel; 4774 struct lba_range *range; 4775 int ctx1; 4776 int rc; 4777 4778 spdk_bdev_initialize(bdev_init_cb, NULL); 4779 4780 bdev = allocate_bdev("bdev0"); 4781 4782 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4783 CU_ASSERT(rc == 0); 4784 CU_ASSERT(desc != NULL); 4785 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4786 io_ch = spdk_bdev_get_io_channel(desc); 4787 CU_ASSERT(io_ch != NULL); 4788 channel = spdk_io_channel_get_ctx(io_ch); 4789 4790 /* Lock range 20-29. */ 4791 g_lock_lba_range_done = false; 4792 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4793 CU_ASSERT(rc == 0); 4794 poll_threads(); 4795 4796 CU_ASSERT(g_lock_lba_range_done == true); 4797 range = TAILQ_FIRST(&channel->locked_ranges); 4798 SPDK_CU_ASSERT_FATAL(range != NULL); 4799 CU_ASSERT(range->offset == 20); 4800 CU_ASSERT(range->length == 10); 4801 4802 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4803 * 20-29. 4804 */ 4805 g_lock_lba_range_done = false; 4806 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4807 CU_ASSERT(rc == 0); 4808 poll_threads(); 4809 4810 CU_ASSERT(g_lock_lba_range_done == false); 4811 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4812 SPDK_CU_ASSERT_FATAL(range != NULL); 4813 CU_ASSERT(range->offset == 25); 4814 CU_ASSERT(range->length == 15); 4815 4816 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4817 * no longer overlaps with an active lock. 4818 */ 4819 g_unlock_lba_range_done = false; 4820 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4821 CU_ASSERT(rc == 0); 4822 poll_threads(); 4823 4824 CU_ASSERT(g_unlock_lba_range_done == true); 4825 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4826 range = TAILQ_FIRST(&channel->locked_ranges); 4827 SPDK_CU_ASSERT_FATAL(range != NULL); 4828 CU_ASSERT(range->offset == 25); 4829 CU_ASSERT(range->length == 15); 4830 4831 /* Lock 40-59. This should immediately lock since it does not overlap with the 4832 * currently active 25-39 lock. 4833 */ 4834 g_lock_lba_range_done = false; 4835 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4836 CU_ASSERT(rc == 0); 4837 poll_threads(); 4838 4839 CU_ASSERT(g_lock_lba_range_done == true); 4840 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4841 SPDK_CU_ASSERT_FATAL(range != NULL); 4842 range = TAILQ_NEXT(range, tailq); 4843 SPDK_CU_ASSERT_FATAL(range != NULL); 4844 CU_ASSERT(range->offset == 40); 4845 CU_ASSERT(range->length == 20); 4846 4847 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4848 g_lock_lba_range_done = false; 4849 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4850 CU_ASSERT(rc == 0); 4851 poll_threads(); 4852 4853 CU_ASSERT(g_lock_lba_range_done == false); 4854 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4855 SPDK_CU_ASSERT_FATAL(range != NULL); 4856 CU_ASSERT(range->offset == 35); 4857 CU_ASSERT(range->length == 10); 4858 4859 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4860 * the 40-59 lock is still active. 4861 */ 4862 g_unlock_lba_range_done = false; 4863 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4864 CU_ASSERT(rc == 0); 4865 poll_threads(); 4866 4867 CU_ASSERT(g_unlock_lba_range_done == true); 4868 CU_ASSERT(g_lock_lba_range_done == false); 4869 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4870 SPDK_CU_ASSERT_FATAL(range != NULL); 4871 CU_ASSERT(range->offset == 35); 4872 CU_ASSERT(range->length == 10); 4873 4874 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4875 * no longer any active overlapping locks. 4876 */ 4877 g_unlock_lba_range_done = false; 4878 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4879 CU_ASSERT(rc == 0); 4880 poll_threads(); 4881 4882 CU_ASSERT(g_unlock_lba_range_done == true); 4883 CU_ASSERT(g_lock_lba_range_done == true); 4884 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4885 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4886 SPDK_CU_ASSERT_FATAL(range != NULL); 4887 CU_ASSERT(range->offset == 35); 4888 CU_ASSERT(range->length == 10); 4889 4890 /* Finally, unlock 35-44. */ 4891 g_unlock_lba_range_done = false; 4892 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4893 CU_ASSERT(rc == 0); 4894 poll_threads(); 4895 4896 CU_ASSERT(g_unlock_lba_range_done == true); 4897 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4898 4899 spdk_put_io_channel(io_ch); 4900 spdk_bdev_close(desc); 4901 free_bdev(bdev); 4902 spdk_bdev_finish(bdev_fini_cb, NULL); 4903 poll_threads(); 4904 } 4905 4906 static void 4907 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4908 { 4909 g_abort_done = true; 4910 g_abort_status = bdev_io->internal.status; 4911 spdk_bdev_free_io(bdev_io); 4912 } 4913 4914 static void 4915 bdev_io_abort(void) 4916 { 4917 struct spdk_bdev *bdev; 4918 struct spdk_bdev_desc *desc = NULL; 4919 struct spdk_io_channel *io_ch; 4920 struct spdk_bdev_channel *channel; 4921 struct spdk_bdev_mgmt_channel *mgmt_ch; 4922 struct spdk_bdev_opts bdev_opts = {}; 4923 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4924 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4925 int rc; 4926 4927 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4928 bdev_opts.bdev_io_pool_size = 7; 4929 bdev_opts.bdev_io_cache_size = 2; 4930 4931 rc = spdk_bdev_set_opts(&bdev_opts); 4932 CU_ASSERT(rc == 0); 4933 spdk_bdev_initialize(bdev_init_cb, NULL); 4934 4935 bdev = allocate_bdev("bdev0"); 4936 4937 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4938 CU_ASSERT(rc == 0); 4939 CU_ASSERT(desc != NULL); 4940 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4941 io_ch = spdk_bdev_get_io_channel(desc); 4942 CU_ASSERT(io_ch != NULL); 4943 channel = spdk_io_channel_get_ctx(io_ch); 4944 mgmt_ch = channel->shared_resource->mgmt_ch; 4945 4946 g_abort_done = false; 4947 4948 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4949 4950 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4951 CU_ASSERT(rc == -ENOTSUP); 4952 4953 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4954 4955 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4956 CU_ASSERT(rc == 0); 4957 CU_ASSERT(g_abort_done == true); 4958 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4959 4960 /* Test the case that the target I/O was successfully aborted. */ 4961 g_io_done = false; 4962 4963 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4964 CU_ASSERT(rc == 0); 4965 CU_ASSERT(g_io_done == false); 4966 4967 g_abort_done = false; 4968 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4969 4970 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4971 CU_ASSERT(rc == 0); 4972 CU_ASSERT(g_io_done == true); 4973 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4974 stub_complete_io(1); 4975 CU_ASSERT(g_abort_done == true); 4976 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4977 4978 /* Test the case that the target I/O was not aborted because it completed 4979 * in the middle of execution of the abort. 4980 */ 4981 g_io_done = false; 4982 4983 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4984 CU_ASSERT(rc == 0); 4985 CU_ASSERT(g_io_done == false); 4986 4987 g_abort_done = false; 4988 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4989 4990 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4991 CU_ASSERT(rc == 0); 4992 CU_ASSERT(g_io_done == false); 4993 4994 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4995 stub_complete_io(1); 4996 CU_ASSERT(g_io_done == true); 4997 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4998 4999 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5000 stub_complete_io(1); 5001 CU_ASSERT(g_abort_done == true); 5002 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5003 5004 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5005 5006 bdev->optimal_io_boundary = 16; 5007 bdev->split_on_optimal_io_boundary = true; 5008 5009 /* Test that a single-vector command which is split is aborted correctly. 5010 * Offset 14, length 8, payload 0xF000 5011 * Child - Offset 14, length 2, payload 0xF000 5012 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5013 */ 5014 g_io_done = false; 5015 5016 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5017 CU_ASSERT(rc == 0); 5018 CU_ASSERT(g_io_done == false); 5019 5020 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5021 5022 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5023 5024 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5025 CU_ASSERT(rc == 0); 5026 CU_ASSERT(g_io_done == true); 5027 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5028 stub_complete_io(2); 5029 CU_ASSERT(g_abort_done == true); 5030 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5031 5032 /* Test that a multi-vector command that needs to be split by strip and then 5033 * needs to be split is aborted correctly. Abort is requested before the second 5034 * child I/O was submitted. The parent I/O should complete with failure without 5035 * submitting the second child I/O. 5036 */ 5037 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5038 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5039 iov[i].iov_len = 512; 5040 } 5041 5042 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 5043 g_io_done = false; 5044 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 5045 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5046 CU_ASSERT(rc == 0); 5047 CU_ASSERT(g_io_done == false); 5048 5049 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5050 5051 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5052 5053 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5054 CU_ASSERT(rc == 0); 5055 CU_ASSERT(g_io_done == true); 5056 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5057 stub_complete_io(1); 5058 CU_ASSERT(g_abort_done == true); 5059 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5060 5061 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5062 5063 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5064 5065 bdev->optimal_io_boundary = 16; 5066 g_io_done = false; 5067 5068 /* Test that a ingle-vector command which is split is aborted correctly. 5069 * Differently from the above, the child abort request will be submitted 5070 * sequentially due to the capacity of spdk_bdev_io. 5071 */ 5072 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5073 CU_ASSERT(rc == 0); 5074 CU_ASSERT(g_io_done == false); 5075 5076 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5077 5078 g_abort_done = false; 5079 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5080 5081 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5082 CU_ASSERT(rc == 0); 5083 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5084 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5085 5086 stub_complete_io(1); 5087 CU_ASSERT(g_io_done == true); 5088 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5089 stub_complete_io(3); 5090 CU_ASSERT(g_abort_done == true); 5091 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5092 5093 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5094 5095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5096 5097 spdk_put_io_channel(io_ch); 5098 spdk_bdev_close(desc); 5099 free_bdev(bdev); 5100 spdk_bdev_finish(bdev_fini_cb, NULL); 5101 poll_threads(); 5102 } 5103 5104 static void 5105 bdev_unmap(void) 5106 { 5107 struct spdk_bdev *bdev; 5108 struct spdk_bdev_desc *desc = NULL; 5109 struct spdk_io_channel *ioch; 5110 struct spdk_bdev_channel *bdev_ch; 5111 struct ut_expected_io *expected_io; 5112 struct spdk_bdev_opts bdev_opts = {}; 5113 uint32_t i, num_outstanding; 5114 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5115 int rc; 5116 5117 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5118 bdev_opts.bdev_io_pool_size = 512; 5119 bdev_opts.bdev_io_cache_size = 64; 5120 rc = spdk_bdev_set_opts(&bdev_opts); 5121 CU_ASSERT(rc == 0); 5122 5123 spdk_bdev_initialize(bdev_init_cb, NULL); 5124 bdev = allocate_bdev("bdev"); 5125 5126 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5127 CU_ASSERT_EQUAL(rc, 0); 5128 SPDK_CU_ASSERT_FATAL(desc != NULL); 5129 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5130 ioch = spdk_bdev_get_io_channel(desc); 5131 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5132 bdev_ch = spdk_io_channel_get_ctx(ioch); 5133 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5134 5135 fn_table.submit_request = stub_submit_request; 5136 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5137 5138 /* Case 1: First test the request won't be split */ 5139 num_blocks = 32; 5140 5141 g_io_done = false; 5142 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5143 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5144 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5145 CU_ASSERT_EQUAL(rc, 0); 5146 CU_ASSERT(g_io_done == false); 5147 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5148 stub_complete_io(1); 5149 CU_ASSERT(g_io_done == true); 5150 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5151 5152 /* Case 2: Test the split with 2 children requests */ 5153 bdev->max_unmap = 8; 5154 bdev->max_unmap_segments = 2; 5155 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5156 num_blocks = max_unmap_blocks * 2; 5157 offset = 0; 5158 5159 g_io_done = false; 5160 for (i = 0; i < 2; i++) { 5161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5162 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5163 offset += max_unmap_blocks; 5164 } 5165 5166 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5167 CU_ASSERT_EQUAL(rc, 0); 5168 CU_ASSERT(g_io_done == false); 5169 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5170 stub_complete_io(2); 5171 CU_ASSERT(g_io_done == true); 5172 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5173 5174 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5175 num_children = 15; 5176 num_blocks = max_unmap_blocks * num_children; 5177 g_io_done = false; 5178 offset = 0; 5179 for (i = 0; i < num_children; i++) { 5180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5181 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5182 offset += max_unmap_blocks; 5183 } 5184 5185 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5186 CU_ASSERT_EQUAL(rc, 0); 5187 CU_ASSERT(g_io_done == false); 5188 5189 while (num_children > 0) { 5190 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5192 stub_complete_io(num_outstanding); 5193 num_children -= num_outstanding; 5194 } 5195 CU_ASSERT(g_io_done == true); 5196 5197 spdk_put_io_channel(ioch); 5198 spdk_bdev_close(desc); 5199 free_bdev(bdev); 5200 spdk_bdev_finish(bdev_fini_cb, NULL); 5201 poll_threads(); 5202 } 5203 5204 static void 5205 bdev_write_zeroes_split_test(void) 5206 { 5207 struct spdk_bdev *bdev; 5208 struct spdk_bdev_desc *desc = NULL; 5209 struct spdk_io_channel *ioch; 5210 struct spdk_bdev_channel *bdev_ch; 5211 struct ut_expected_io *expected_io; 5212 struct spdk_bdev_opts bdev_opts = {}; 5213 uint32_t i, num_outstanding; 5214 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5215 int rc; 5216 5217 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5218 bdev_opts.bdev_io_pool_size = 512; 5219 bdev_opts.bdev_io_cache_size = 64; 5220 rc = spdk_bdev_set_opts(&bdev_opts); 5221 CU_ASSERT(rc == 0); 5222 5223 spdk_bdev_initialize(bdev_init_cb, NULL); 5224 bdev = allocate_bdev("bdev"); 5225 5226 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5227 CU_ASSERT_EQUAL(rc, 0); 5228 SPDK_CU_ASSERT_FATAL(desc != NULL); 5229 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5230 ioch = spdk_bdev_get_io_channel(desc); 5231 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5232 bdev_ch = spdk_io_channel_get_ctx(ioch); 5233 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5234 5235 fn_table.submit_request = stub_submit_request; 5236 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5237 5238 /* Case 1: First test the request won't be split */ 5239 num_blocks = 32; 5240 5241 g_io_done = false; 5242 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5243 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5244 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5245 CU_ASSERT_EQUAL(rc, 0); 5246 CU_ASSERT(g_io_done == false); 5247 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5248 stub_complete_io(1); 5249 CU_ASSERT(g_io_done == true); 5250 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5251 5252 /* Case 2: Test the split with 2 children requests */ 5253 max_write_zeroes_blocks = 8; 5254 bdev->max_write_zeroes = max_write_zeroes_blocks; 5255 num_blocks = max_write_zeroes_blocks * 2; 5256 offset = 0; 5257 5258 g_io_done = false; 5259 for (i = 0; i < 2; i++) { 5260 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5261 0); 5262 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5263 offset += max_write_zeroes_blocks; 5264 } 5265 5266 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5267 CU_ASSERT_EQUAL(rc, 0); 5268 CU_ASSERT(g_io_done == false); 5269 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5270 stub_complete_io(2); 5271 CU_ASSERT(g_io_done == true); 5272 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5273 5274 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5275 num_children = 15; 5276 num_blocks = max_write_zeroes_blocks * num_children; 5277 g_io_done = false; 5278 offset = 0; 5279 for (i = 0; i < num_children; i++) { 5280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5281 0); 5282 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5283 offset += max_write_zeroes_blocks; 5284 } 5285 5286 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5287 CU_ASSERT_EQUAL(rc, 0); 5288 CU_ASSERT(g_io_done == false); 5289 5290 while (num_children > 0) { 5291 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5292 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5293 stub_complete_io(num_outstanding); 5294 num_children -= num_outstanding; 5295 } 5296 CU_ASSERT(g_io_done == true); 5297 5298 spdk_put_io_channel(ioch); 5299 spdk_bdev_close(desc); 5300 free_bdev(bdev); 5301 spdk_bdev_finish(bdev_fini_cb, NULL); 5302 poll_threads(); 5303 } 5304 5305 static void 5306 bdev_set_options_test(void) 5307 { 5308 struct spdk_bdev_opts bdev_opts = {}; 5309 int rc; 5310 5311 /* Case1: Do not set opts_size */ 5312 rc = spdk_bdev_set_opts(&bdev_opts); 5313 CU_ASSERT(rc == -1); 5314 5315 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5316 bdev_opts.bdev_io_pool_size = 4; 5317 bdev_opts.bdev_io_cache_size = 2; 5318 bdev_opts.small_buf_pool_size = 4; 5319 5320 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5321 rc = spdk_bdev_set_opts(&bdev_opts); 5322 CU_ASSERT(rc == -1); 5323 5324 /* Case 3: Do not set valid large_buf_pool_size */ 5325 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5326 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5327 rc = spdk_bdev_set_opts(&bdev_opts); 5328 CU_ASSERT(rc == -1); 5329 5330 /* Case4: set valid large buf_pool_size */ 5331 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5332 rc = spdk_bdev_set_opts(&bdev_opts); 5333 CU_ASSERT(rc == 0); 5334 5335 /* Case5: Set different valid value for small and large buf pool */ 5336 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5337 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5338 rc = spdk_bdev_set_opts(&bdev_opts); 5339 CU_ASSERT(rc == 0); 5340 } 5341 5342 static uint64_t 5343 get_ns_time(void) 5344 { 5345 int rc; 5346 struct timespec ts; 5347 5348 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5349 CU_ASSERT(rc == 0); 5350 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5351 } 5352 5353 static int 5354 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5355 { 5356 int h1, h2; 5357 5358 if (bdev_name == NULL) { 5359 return -1; 5360 } else { 5361 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5362 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5363 5364 return spdk_max(h1, h2) + 1; 5365 } 5366 } 5367 5368 static void 5369 bdev_multi_allocation(void) 5370 { 5371 const int max_bdev_num = 1024 * 16; 5372 char name[max_bdev_num][16]; 5373 char noexist_name[] = "invalid_bdev"; 5374 struct spdk_bdev *bdev[max_bdev_num]; 5375 int i, j; 5376 uint64_t last_time; 5377 int bdev_num; 5378 int height; 5379 5380 for (j = 0; j < max_bdev_num; j++) { 5381 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5382 } 5383 5384 for (i = 0; i < 16; i++) { 5385 last_time = get_ns_time(); 5386 bdev_num = 1024 * (i + 1); 5387 for (j = 0; j < bdev_num; j++) { 5388 bdev[j] = allocate_bdev(name[j]); 5389 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5390 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5391 } 5392 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5393 (get_ns_time() - last_time) / 1000 / 1000); 5394 for (j = 0; j < bdev_num; j++) { 5395 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5396 } 5397 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5398 5399 for (j = 0; j < bdev_num; j++) { 5400 free_bdev(bdev[j]); 5401 } 5402 for (j = 0; j < bdev_num; j++) { 5403 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5404 } 5405 } 5406 } 5407 5408 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5409 5410 static int 5411 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5412 int array_size) 5413 { 5414 if (array_size > 0 && domains) { 5415 domains[0] = g_bdev_memory_domain; 5416 } 5417 5418 return 1; 5419 } 5420 5421 static void 5422 bdev_get_memory_domains(void) 5423 { 5424 struct spdk_bdev_fn_table fn_table = { 5425 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5426 }; 5427 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5428 struct spdk_memory_domain *domains[2] = {}; 5429 int rc; 5430 5431 /* bdev is NULL */ 5432 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5433 CU_ASSERT(rc == -EINVAL); 5434 5435 /* domains is NULL */ 5436 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5437 CU_ASSERT(rc == 1); 5438 5439 /* array size is 0 */ 5440 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5441 CU_ASSERT(rc == 1); 5442 5443 /* get_supported_dma_device_types op is set */ 5444 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5445 CU_ASSERT(rc == 1); 5446 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5447 5448 /* get_supported_dma_device_types op is not set */ 5449 fn_table.get_memory_domains = NULL; 5450 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5451 CU_ASSERT(rc == 0); 5452 } 5453 5454 static void 5455 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5456 { 5457 struct spdk_bdev *bdev; 5458 struct spdk_bdev_desc *desc = NULL; 5459 struct spdk_io_channel *io_ch; 5460 char io_buf[512]; 5461 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5462 struct ut_expected_io *expected_io; 5463 int rc; 5464 5465 spdk_bdev_initialize(bdev_init_cb, NULL); 5466 5467 bdev = allocate_bdev("bdev0"); 5468 bdev->md_interleave = false; 5469 bdev->md_len = 8; 5470 5471 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5472 CU_ASSERT(rc == 0); 5473 SPDK_CU_ASSERT_FATAL(desc != NULL); 5474 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5475 io_ch = spdk_bdev_get_io_channel(desc); 5476 CU_ASSERT(io_ch != NULL); 5477 5478 /* read */ 5479 g_io_done = false; 5480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5481 if (ext_io_opts) { 5482 expected_io->md_buf = ext_io_opts->metadata; 5483 expected_io->ext_io_opts = ext_io_opts; 5484 } 5485 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5486 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5487 5488 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5489 5490 CU_ASSERT(rc == 0); 5491 CU_ASSERT(g_io_done == false); 5492 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5493 stub_complete_io(1); 5494 CU_ASSERT(g_io_done == true); 5495 5496 /* write */ 5497 g_io_done = false; 5498 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5499 if (ext_io_opts) { 5500 expected_io->md_buf = ext_io_opts->metadata; 5501 expected_io->ext_io_opts = ext_io_opts; 5502 } 5503 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5504 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5505 5506 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5507 5508 CU_ASSERT(rc == 0); 5509 CU_ASSERT(g_io_done == false); 5510 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5511 stub_complete_io(1); 5512 CU_ASSERT(g_io_done == true); 5513 5514 spdk_put_io_channel(io_ch); 5515 spdk_bdev_close(desc); 5516 free_bdev(bdev); 5517 spdk_bdev_finish(bdev_fini_cb, NULL); 5518 poll_threads(); 5519 5520 } 5521 5522 static void 5523 bdev_io_ext(void) 5524 { 5525 struct spdk_bdev_ext_io_opts ext_io_opts = { 5526 .metadata = (void *)0xFF000000, 5527 .size = sizeof(ext_io_opts) 5528 }; 5529 5530 _bdev_io_ext(&ext_io_opts); 5531 } 5532 5533 static void 5534 bdev_io_ext_no_opts(void) 5535 { 5536 _bdev_io_ext(NULL); 5537 } 5538 5539 static void 5540 bdev_io_ext_invalid_opts(void) 5541 { 5542 struct spdk_bdev *bdev; 5543 struct spdk_bdev_desc *desc = NULL; 5544 struct spdk_io_channel *io_ch; 5545 char io_buf[512]; 5546 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5547 struct spdk_bdev_ext_io_opts ext_io_opts = { 5548 .metadata = (void *)0xFF000000, 5549 .size = sizeof(ext_io_opts) 5550 }; 5551 int rc; 5552 5553 spdk_bdev_initialize(bdev_init_cb, NULL); 5554 5555 bdev = allocate_bdev("bdev0"); 5556 bdev->md_interleave = false; 5557 bdev->md_len = 8; 5558 5559 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5560 CU_ASSERT(rc == 0); 5561 SPDK_CU_ASSERT_FATAL(desc != NULL); 5562 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5563 io_ch = spdk_bdev_get_io_channel(desc); 5564 CU_ASSERT(io_ch != NULL); 5565 5566 /* Test invalid ext_opts size */ 5567 ext_io_opts.size = 0; 5568 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5569 CU_ASSERT(rc == -EINVAL); 5570 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5571 CU_ASSERT(rc == -EINVAL); 5572 5573 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5574 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5575 CU_ASSERT(rc == -EINVAL); 5576 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5577 CU_ASSERT(rc == -EINVAL); 5578 5579 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5580 sizeof(ext_io_opts.metadata) - 1; 5581 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5582 CU_ASSERT(rc == -EINVAL); 5583 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5584 CU_ASSERT(rc == -EINVAL); 5585 5586 spdk_put_io_channel(io_ch); 5587 spdk_bdev_close(desc); 5588 free_bdev(bdev); 5589 spdk_bdev_finish(bdev_fini_cb, NULL); 5590 poll_threads(); 5591 } 5592 5593 static void 5594 bdev_io_ext_split(void) 5595 { 5596 struct spdk_bdev *bdev; 5597 struct spdk_bdev_desc *desc = NULL; 5598 struct spdk_io_channel *io_ch; 5599 char io_buf[512]; 5600 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5601 struct ut_expected_io *expected_io; 5602 struct spdk_bdev_ext_io_opts ext_io_opts = { 5603 .metadata = (void *)0xFF000000, 5604 .size = sizeof(ext_io_opts) 5605 }; 5606 int rc; 5607 5608 spdk_bdev_initialize(bdev_init_cb, NULL); 5609 5610 bdev = allocate_bdev("bdev0"); 5611 bdev->md_interleave = false; 5612 bdev->md_len = 8; 5613 5614 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5615 CU_ASSERT(rc == 0); 5616 SPDK_CU_ASSERT_FATAL(desc != NULL); 5617 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5618 io_ch = spdk_bdev_get_io_channel(desc); 5619 CU_ASSERT(io_ch != NULL); 5620 5621 /* Check that IO request with ext_opts and metadata is split correctly 5622 * Offset 14, length 8, payload 0xF000 5623 * Child - Offset 14, length 2, payload 0xF000 5624 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5625 */ 5626 bdev->optimal_io_boundary = 16; 5627 bdev->split_on_optimal_io_boundary = true; 5628 bdev->md_interleave = false; 5629 bdev->md_len = 8; 5630 5631 iov.iov_base = (void *)0xF000; 5632 iov.iov_len = 4096; 5633 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5634 ext_io_opts.metadata = (void *)0xFF000000; 5635 ext_io_opts.size = sizeof(ext_io_opts); 5636 g_io_done = false; 5637 5638 /* read */ 5639 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5640 expected_io->md_buf = ext_io_opts.metadata; 5641 expected_io->ext_io_opts = &ext_io_opts; 5642 expected_io->copy_opts = true; 5643 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5644 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5645 5646 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5647 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5648 expected_io->ext_io_opts = &ext_io_opts; 5649 expected_io->copy_opts = true; 5650 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5652 5653 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5654 CU_ASSERT(rc == 0); 5655 CU_ASSERT(g_io_done == false); 5656 5657 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5658 stub_complete_io(2); 5659 CU_ASSERT(g_io_done == true); 5660 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5661 5662 /* write */ 5663 g_io_done = false; 5664 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5665 expected_io->md_buf = ext_io_opts.metadata; 5666 expected_io->ext_io_opts = &ext_io_opts; 5667 expected_io->copy_opts = true; 5668 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5669 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5670 5671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5672 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5673 expected_io->ext_io_opts = &ext_io_opts; 5674 expected_io->copy_opts = true; 5675 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5676 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5677 5678 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5679 CU_ASSERT(rc == 0); 5680 CU_ASSERT(g_io_done == false); 5681 5682 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5683 stub_complete_io(2); 5684 CU_ASSERT(g_io_done == true); 5685 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5686 5687 spdk_put_io_channel(io_ch); 5688 spdk_bdev_close(desc); 5689 free_bdev(bdev); 5690 spdk_bdev_finish(bdev_fini_cb, NULL); 5691 poll_threads(); 5692 } 5693 5694 static void 5695 bdev_io_ext_bounce_buffer(void) 5696 { 5697 struct spdk_bdev *bdev; 5698 struct spdk_bdev_desc *desc = NULL; 5699 struct spdk_io_channel *io_ch; 5700 char io_buf[512]; 5701 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5702 struct ut_expected_io *expected_io; 5703 struct spdk_bdev_ext_io_opts ext_io_opts = { 5704 .metadata = (void *)0xFF000000, 5705 .size = sizeof(ext_io_opts) 5706 }; 5707 int rc; 5708 5709 spdk_bdev_initialize(bdev_init_cb, NULL); 5710 5711 bdev = allocate_bdev("bdev0"); 5712 bdev->md_interleave = false; 5713 bdev->md_len = 8; 5714 5715 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5716 CU_ASSERT(rc == 0); 5717 SPDK_CU_ASSERT_FATAL(desc != NULL); 5718 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5719 io_ch = spdk_bdev_get_io_channel(desc); 5720 CU_ASSERT(io_ch != NULL); 5721 5722 /* Verify data pull/push 5723 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5724 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5725 5726 /* read */ 5727 g_io_done = false; 5728 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5729 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5730 expected_io->ext_io_opts = &ext_io_opts; 5731 expected_io->copy_opts = true; 5732 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5733 5734 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5735 5736 CU_ASSERT(rc == 0); 5737 CU_ASSERT(g_io_done == false); 5738 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5739 stub_complete_io(1); 5740 CU_ASSERT(g_memory_domain_push_data_called == true); 5741 CU_ASSERT(g_io_done == true); 5742 5743 /* write */ 5744 g_io_done = false; 5745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5746 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5747 expected_io->ext_io_opts = &ext_io_opts; 5748 expected_io->copy_opts = true; 5749 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5750 5751 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5752 5753 CU_ASSERT(rc == 0); 5754 CU_ASSERT(g_memory_domain_pull_data_called == true); 5755 CU_ASSERT(g_io_done == false); 5756 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5757 stub_complete_io(1); 5758 CU_ASSERT(g_io_done == true); 5759 5760 spdk_put_io_channel(io_ch); 5761 spdk_bdev_close(desc); 5762 free_bdev(bdev); 5763 spdk_bdev_finish(bdev_fini_cb, NULL); 5764 poll_threads(); 5765 } 5766 5767 static void 5768 bdev_register_uuid_alias(void) 5769 { 5770 struct spdk_bdev *bdev, *second; 5771 char uuid[SPDK_UUID_STRING_LEN]; 5772 int rc; 5773 5774 spdk_bdev_initialize(bdev_init_cb, NULL); 5775 bdev = allocate_bdev("bdev0"); 5776 5777 /* Make sure an UUID was generated */ 5778 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5779 5780 /* Check that an UUID alias was registered */ 5781 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5782 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5783 5784 /* Unregister the bdev */ 5785 spdk_bdev_unregister(bdev, NULL, NULL); 5786 poll_threads(); 5787 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5788 5789 /* Check the same, but this time register the bdev with non-zero UUID */ 5790 rc = spdk_bdev_register(bdev); 5791 CU_ASSERT_EQUAL(rc, 0); 5792 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5793 5794 /* Unregister the bdev */ 5795 spdk_bdev_unregister(bdev, NULL, NULL); 5796 poll_threads(); 5797 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5798 5799 /* Regiser the bdev using UUID as the name */ 5800 bdev->name = uuid; 5801 rc = spdk_bdev_register(bdev); 5802 CU_ASSERT_EQUAL(rc, 0); 5803 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5804 5805 /* Unregister the bdev */ 5806 spdk_bdev_unregister(bdev, NULL, NULL); 5807 poll_threads(); 5808 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5809 5810 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5811 bdev->name = "bdev0"; 5812 second = allocate_bdev("bdev1"); 5813 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5814 rc = spdk_bdev_register(bdev); 5815 CU_ASSERT_EQUAL(rc, -EEXIST); 5816 5817 /* Regenerate the UUID and re-check */ 5818 spdk_uuid_generate(&bdev->uuid); 5819 rc = spdk_bdev_register(bdev); 5820 CU_ASSERT_EQUAL(rc, 0); 5821 5822 /* And check that both bdevs can be retrieved through their UUIDs */ 5823 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5824 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5825 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5826 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5827 5828 free_bdev(second); 5829 free_bdev(bdev); 5830 spdk_bdev_finish(bdev_fini_cb, NULL); 5831 poll_threads(); 5832 } 5833 5834 static void 5835 bdev_unregister_by_name(void) 5836 { 5837 struct spdk_bdev *bdev; 5838 int rc; 5839 5840 bdev = allocate_bdev("bdev"); 5841 5842 g_event_type1 = 0xFF; 5843 g_unregister_arg = NULL; 5844 g_unregister_rc = -1; 5845 5846 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5847 CU_ASSERT(rc == -ENODEV); 5848 5849 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5850 CU_ASSERT(rc == -ENODEV); 5851 5852 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5853 CU_ASSERT(rc == 0); 5854 5855 /* Check that unregister callback is delayed */ 5856 CU_ASSERT(g_unregister_arg == NULL); 5857 CU_ASSERT(g_unregister_rc == -1); 5858 5859 poll_threads(); 5860 5861 /* Event callback shall not be issued because device was closed */ 5862 CU_ASSERT(g_event_type1 == 0xFF); 5863 /* Unregister callback is issued */ 5864 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5865 CU_ASSERT(g_unregister_rc == 0); 5866 5867 free_bdev(bdev); 5868 } 5869 5870 static int 5871 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5872 { 5873 int *count = ctx; 5874 5875 (*count)++; 5876 5877 return 0; 5878 } 5879 5880 static void 5881 for_each_bdev_test(void) 5882 { 5883 struct spdk_bdev *bdev[8]; 5884 int rc, count; 5885 5886 bdev[0] = allocate_bdev("bdev0"); 5887 5888 bdev[1] = allocate_bdev("bdev1"); 5889 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5890 CU_ASSERT(rc == 0); 5891 5892 bdev[2] = allocate_bdev("bdev2"); 5893 5894 bdev[3] = allocate_bdev("bdev3"); 5895 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5896 CU_ASSERT(rc == 0); 5897 5898 bdev[4] = allocate_bdev("bdev4"); 5899 5900 bdev[5] = allocate_bdev("bdev5"); 5901 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5902 CU_ASSERT(rc == 0); 5903 5904 bdev[6] = allocate_bdev("bdev6"); 5905 5906 bdev[7] = allocate_bdev("bdev7"); 5907 5908 count = 0; 5909 rc = spdk_for_each_bdev(&count, count_bdevs); 5910 CU_ASSERT(rc == 0); 5911 CU_ASSERT(count == 8); 5912 5913 count = 0; 5914 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5915 CU_ASSERT(rc == 0); 5916 CU_ASSERT(count == 5); 5917 5918 free_bdev(bdev[0]); 5919 free_bdev(bdev[1]); 5920 free_bdev(bdev[2]); 5921 free_bdev(bdev[3]); 5922 free_bdev(bdev[4]); 5923 free_bdev(bdev[5]); 5924 free_bdev(bdev[6]); 5925 free_bdev(bdev[7]); 5926 } 5927 5928 static void 5929 bdev_seek_test(void) 5930 { 5931 struct spdk_bdev *bdev; 5932 struct spdk_bdev_desc *desc = NULL; 5933 struct spdk_io_channel *io_ch; 5934 int rc; 5935 5936 spdk_bdev_initialize(bdev_init_cb, NULL); 5937 poll_threads(); 5938 5939 bdev = allocate_bdev("bdev0"); 5940 5941 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5942 CU_ASSERT(rc == 0); 5943 poll_threads(); 5944 SPDK_CU_ASSERT_FATAL(desc != NULL); 5945 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5946 io_ch = spdk_bdev_get_io_channel(desc); 5947 CU_ASSERT(io_ch != NULL); 5948 5949 /* Seek data not supported */ 5950 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 5951 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5952 CU_ASSERT(rc == 0); 5953 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5954 poll_threads(); 5955 CU_ASSERT(g_seek_offset == 0); 5956 5957 /* Seek hole not supported */ 5958 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 5959 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5960 CU_ASSERT(rc == 0); 5961 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5962 poll_threads(); 5963 CU_ASSERT(g_seek_offset == UINT64_MAX); 5964 5965 /* Seek data supported */ 5966 g_seek_data_offset = 12345; 5967 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 5968 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5969 CU_ASSERT(rc == 0); 5970 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5971 stub_complete_io(1); 5972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5973 CU_ASSERT(g_seek_offset == 12345); 5974 5975 /* Seek hole supported */ 5976 g_seek_hole_offset = 67890; 5977 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 5978 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5979 CU_ASSERT(rc == 0); 5980 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5981 stub_complete_io(1); 5982 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5983 CU_ASSERT(g_seek_offset == 67890); 5984 5985 spdk_put_io_channel(io_ch); 5986 spdk_bdev_close(desc); 5987 free_bdev(bdev); 5988 spdk_bdev_finish(bdev_fini_cb, NULL); 5989 poll_threads(); 5990 } 5991 5992 int 5993 main(int argc, char **argv) 5994 { 5995 CU_pSuite suite = NULL; 5996 unsigned int num_failures; 5997 5998 CU_set_error_action(CUEA_ABORT); 5999 CU_initialize_registry(); 6000 6001 suite = CU_add_suite("bdev", null_init, null_clean); 6002 6003 CU_ADD_TEST(suite, bytes_to_blocks_test); 6004 CU_ADD_TEST(suite, num_blocks_test); 6005 CU_ADD_TEST(suite, io_valid_test); 6006 CU_ADD_TEST(suite, open_write_test); 6007 CU_ADD_TEST(suite, claim_test); 6008 CU_ADD_TEST(suite, alias_add_del_test); 6009 CU_ADD_TEST(suite, get_device_stat_test); 6010 CU_ADD_TEST(suite, bdev_io_types_test); 6011 CU_ADD_TEST(suite, bdev_io_wait_test); 6012 CU_ADD_TEST(suite, bdev_io_spans_split_test); 6013 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 6014 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 6015 CU_ADD_TEST(suite, bdev_io_mix_split_test); 6016 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 6017 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 6018 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 6019 CU_ADD_TEST(suite, bdev_io_alignment); 6020 CU_ADD_TEST(suite, bdev_histograms); 6021 CU_ADD_TEST(suite, bdev_write_zeroes); 6022 CU_ADD_TEST(suite, bdev_compare_and_write); 6023 CU_ADD_TEST(suite, bdev_compare); 6024 CU_ADD_TEST(suite, bdev_compare_emulated); 6025 CU_ADD_TEST(suite, bdev_zcopy_write); 6026 CU_ADD_TEST(suite, bdev_zcopy_read); 6027 CU_ADD_TEST(suite, bdev_open_while_hotremove); 6028 CU_ADD_TEST(suite, bdev_close_while_hotremove); 6029 CU_ADD_TEST(suite, bdev_open_ext); 6030 CU_ADD_TEST(suite, bdev_open_ext_unregister); 6031 CU_ADD_TEST(suite, bdev_set_io_timeout); 6032 CU_ADD_TEST(suite, bdev_set_qd_sampling); 6033 CU_ADD_TEST(suite, lba_range_overlap); 6034 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 6035 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 6036 CU_ADD_TEST(suite, lock_lba_range_overlapped); 6037 CU_ADD_TEST(suite, bdev_io_abort); 6038 CU_ADD_TEST(suite, bdev_unmap); 6039 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 6040 CU_ADD_TEST(suite, bdev_set_options_test); 6041 CU_ADD_TEST(suite, bdev_multi_allocation); 6042 CU_ADD_TEST(suite, bdev_get_memory_domains); 6043 CU_ADD_TEST(suite, bdev_io_ext); 6044 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 6045 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 6046 CU_ADD_TEST(suite, bdev_io_ext_split); 6047 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 6048 CU_ADD_TEST(suite, bdev_register_uuid_alias); 6049 CU_ADD_TEST(suite, bdev_unregister_by_name); 6050 CU_ADD_TEST(suite, for_each_bdev_test); 6051 CU_ADD_TEST(suite, bdev_seek_test); 6052 6053 allocate_cores(1); 6054 allocate_threads(1); 6055 set_thread(0); 6056 6057 CU_basic_set_mode(CU_BRM_VERBOSE); 6058 CU_basic_run_tests(); 6059 num_failures = CU_get_number_of_failures(); 6060 CU_cleanup_registry(); 6061 6062 free_threads(); 6063 free_cores(); 6064 6065 return num_failures; 6066 } 6067