1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t src_offset; 90 uint64_t length; 91 int iovcnt; 92 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 93 void *md_buf; 94 struct spdk_bdev_ext_io_opts *ext_io_opts; 95 bool copy_opts; 96 TAILQ_ENTRY(ut_expected_io) link; 97 }; 98 99 struct bdev_ut_channel { 100 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 101 uint32_t outstanding_io_count; 102 TAILQ_HEAD(, ut_expected_io) expected_io; 103 }; 104 105 static bool g_io_done; 106 static struct spdk_bdev_io *g_bdev_io; 107 static enum spdk_bdev_io_status g_io_status; 108 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 109 static uint32_t g_bdev_ut_io_device; 110 static struct bdev_ut_channel *g_bdev_ut_channel; 111 static void *g_compare_read_buf; 112 static uint32_t g_compare_read_buf_len; 113 static void *g_compare_write_buf; 114 static uint32_t g_compare_write_buf_len; 115 static void *g_compare_md_buf; 116 static bool g_abort_done; 117 static enum spdk_bdev_io_status g_abort_status; 118 static void *g_zcopy_read_buf; 119 static uint32_t g_zcopy_read_buf_len; 120 static void *g_zcopy_write_buf; 121 static uint32_t g_zcopy_write_buf_len; 122 static struct spdk_bdev_io *g_zcopy_bdev_io; 123 static uint64_t g_seek_data_offset; 124 static uint64_t g_seek_hole_offset; 125 static uint64_t g_seek_offset; 126 127 static struct ut_expected_io * 128 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 129 { 130 struct ut_expected_io *expected_io; 131 132 expected_io = calloc(1, sizeof(*expected_io)); 133 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 134 135 expected_io->type = type; 136 expected_io->offset = offset; 137 expected_io->length = length; 138 expected_io->iovcnt = iovcnt; 139 140 return expected_io; 141 } 142 143 static struct ut_expected_io * 144 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 145 { 146 struct ut_expected_io *expected_io; 147 148 expected_io = calloc(1, sizeof(*expected_io)); 149 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 150 151 expected_io->type = type; 152 expected_io->offset = offset; 153 expected_io->src_offset = src_offset; 154 expected_io->length = length; 155 156 return expected_io; 157 } 158 159 static void 160 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 161 { 162 expected_io->iov[pos].iov_base = base; 163 expected_io->iov[pos].iov_len = len; 164 } 165 166 static void 167 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 168 { 169 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 170 struct ut_expected_io *expected_io; 171 struct iovec *iov, *expected_iov; 172 struct spdk_bdev_io *bio_to_abort; 173 int i; 174 175 g_bdev_io = bdev_io; 176 177 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 178 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 179 180 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 181 CU_ASSERT(g_compare_read_buf_len == len); 182 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 183 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 184 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 185 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 186 } 187 } 188 189 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 190 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 191 192 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 193 CU_ASSERT(g_compare_write_buf_len == len); 194 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 195 } 196 197 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 198 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 199 200 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 201 CU_ASSERT(g_compare_read_buf_len == len); 202 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 203 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 204 } 205 if (bdev_io->u.bdev.md_buf && 206 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 207 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 208 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 209 } 210 } 211 212 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 213 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 214 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 215 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 216 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 217 ch->outstanding_io_count--; 218 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 219 break; 220 } 221 } 222 } 223 } 224 225 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 226 if (bdev_io->u.bdev.zcopy.start) { 227 g_zcopy_bdev_io = bdev_io; 228 if (bdev_io->u.bdev.zcopy.populate) { 229 /* Start of a read */ 230 CU_ASSERT(g_zcopy_read_buf != NULL); 231 CU_ASSERT(g_zcopy_read_buf_len > 0); 232 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 233 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 234 bdev_io->u.bdev.iovcnt = 1; 235 } else { 236 /* Start of a write */ 237 CU_ASSERT(g_zcopy_write_buf != NULL); 238 CU_ASSERT(g_zcopy_write_buf_len > 0); 239 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 240 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 241 bdev_io->u.bdev.iovcnt = 1; 242 } 243 } else { 244 if (bdev_io->u.bdev.zcopy.commit) { 245 /* End of write */ 246 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 247 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 248 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 249 g_zcopy_write_buf = NULL; 250 g_zcopy_write_buf_len = 0; 251 } else { 252 /* End of read */ 253 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 254 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 255 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 256 g_zcopy_read_buf = NULL; 257 g_zcopy_read_buf_len = 0; 258 } 259 } 260 } 261 262 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 263 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 264 } 265 266 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 267 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 268 } 269 270 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 271 ch->outstanding_io_count++; 272 273 expected_io = TAILQ_FIRST(&ch->expected_io); 274 if (expected_io == NULL) { 275 return; 276 } 277 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 278 279 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 280 CU_ASSERT(bdev_io->type == expected_io->type); 281 } 282 283 if (expected_io->md_buf != NULL) { 284 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 285 if (bdev_io->u.bdev.ext_opts) { 286 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 287 } 288 } 289 290 if (expected_io->copy_opts) { 291 if (expected_io->ext_io_opts) { 292 /* opts are not NULL so it should have been copied */ 293 CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts); 294 CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy); 295 /* internal opts always points to opts passed */ 296 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts); 297 } else { 298 /* passed opts was NULL so we expect bdev_io opts to be NULL */ 299 CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL); 300 } 301 } else { 302 /* opts were not copied so they should be equal */ 303 CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts); 304 } 305 306 if (expected_io->length == 0) { 307 free(expected_io); 308 return; 309 } 310 311 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 312 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 313 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 314 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 315 } 316 317 if (expected_io->iovcnt == 0) { 318 free(expected_io); 319 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 320 return; 321 } 322 323 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 324 for (i = 0; i < expected_io->iovcnt; i++) { 325 expected_iov = &expected_io->iov[i]; 326 if (bdev_io->internal.orig_iovcnt == 0) { 327 iov = &bdev_io->u.bdev.iovs[i]; 328 } else { 329 iov = bdev_io->internal.orig_iovs; 330 } 331 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 332 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 333 } 334 335 free(expected_io); 336 } 337 338 static void 339 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 340 struct spdk_bdev_io *bdev_io, bool success) 341 { 342 CU_ASSERT(success == true); 343 344 stub_submit_request(_ch, bdev_io); 345 } 346 347 static void 348 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 349 { 350 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 351 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 352 } 353 354 static uint32_t 355 stub_complete_io(uint32_t num_to_complete) 356 { 357 struct bdev_ut_channel *ch = g_bdev_ut_channel; 358 struct spdk_bdev_io *bdev_io; 359 static enum spdk_bdev_io_status io_status; 360 uint32_t num_completed = 0; 361 362 while (num_completed < num_to_complete) { 363 if (TAILQ_EMPTY(&ch->outstanding_io)) { 364 break; 365 } 366 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 367 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 368 ch->outstanding_io_count--; 369 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 370 g_io_exp_status; 371 spdk_bdev_io_complete(bdev_io, io_status); 372 num_completed++; 373 } 374 375 return num_completed; 376 } 377 378 static struct spdk_io_channel * 379 bdev_ut_get_io_channel(void *ctx) 380 { 381 return spdk_get_io_channel(&g_bdev_ut_io_device); 382 } 383 384 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 385 [SPDK_BDEV_IO_TYPE_READ] = true, 386 [SPDK_BDEV_IO_TYPE_WRITE] = true, 387 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 388 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 389 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 390 [SPDK_BDEV_IO_TYPE_RESET] = true, 391 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 392 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 393 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 394 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 395 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 396 [SPDK_BDEV_IO_TYPE_ABORT] = true, 397 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 398 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 399 [SPDK_BDEV_IO_TYPE_COPY] = true, 400 }; 401 402 static void 403 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 404 { 405 g_io_types_supported[io_type] = enable; 406 } 407 408 static bool 409 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 410 { 411 return g_io_types_supported[io_type]; 412 } 413 414 static struct spdk_bdev_fn_table fn_table = { 415 .destruct = stub_destruct, 416 .submit_request = stub_submit_request, 417 .get_io_channel = bdev_ut_get_io_channel, 418 .io_type_supported = stub_io_type_supported, 419 }; 420 421 static int 422 bdev_ut_create_ch(void *io_device, void *ctx_buf) 423 { 424 struct bdev_ut_channel *ch = ctx_buf; 425 426 CU_ASSERT(g_bdev_ut_channel == NULL); 427 g_bdev_ut_channel = ch; 428 429 TAILQ_INIT(&ch->outstanding_io); 430 ch->outstanding_io_count = 0; 431 TAILQ_INIT(&ch->expected_io); 432 return 0; 433 } 434 435 static void 436 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 437 { 438 CU_ASSERT(g_bdev_ut_channel != NULL); 439 g_bdev_ut_channel = NULL; 440 } 441 442 struct spdk_bdev_module bdev_ut_if; 443 444 static int 445 bdev_ut_module_init(void) 446 { 447 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 448 sizeof(struct bdev_ut_channel), NULL); 449 spdk_bdev_module_init_done(&bdev_ut_if); 450 return 0; 451 } 452 453 static void 454 bdev_ut_module_fini(void) 455 { 456 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 457 } 458 459 struct spdk_bdev_module bdev_ut_if = { 460 .name = "bdev_ut", 461 .module_init = bdev_ut_module_init, 462 .module_fini = bdev_ut_module_fini, 463 .async_init = true, 464 }; 465 466 static void vbdev_ut_examine(struct spdk_bdev *bdev); 467 468 static int 469 vbdev_ut_module_init(void) 470 { 471 return 0; 472 } 473 474 static void 475 vbdev_ut_module_fini(void) 476 { 477 } 478 479 struct spdk_bdev_module vbdev_ut_if = { 480 .name = "vbdev_ut", 481 .module_init = vbdev_ut_module_init, 482 .module_fini = vbdev_ut_module_fini, 483 .examine_config = vbdev_ut_examine, 484 }; 485 486 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 487 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 488 489 static void 490 vbdev_ut_examine(struct spdk_bdev *bdev) 491 { 492 spdk_bdev_module_examine_done(&vbdev_ut_if); 493 } 494 495 static struct spdk_bdev * 496 allocate_bdev(char *name) 497 { 498 struct spdk_bdev *bdev; 499 int rc; 500 501 bdev = calloc(1, sizeof(*bdev)); 502 SPDK_CU_ASSERT_FATAL(bdev != NULL); 503 504 bdev->name = name; 505 bdev->fn_table = &fn_table; 506 bdev->module = &bdev_ut_if; 507 bdev->blockcnt = 1024; 508 bdev->blocklen = 512; 509 510 spdk_uuid_generate(&bdev->uuid); 511 512 rc = spdk_bdev_register(bdev); 513 poll_threads(); 514 CU_ASSERT(rc == 0); 515 516 return bdev; 517 } 518 519 static struct spdk_bdev * 520 allocate_vbdev(char *name) 521 { 522 struct spdk_bdev *bdev; 523 int rc; 524 525 bdev = calloc(1, sizeof(*bdev)); 526 SPDK_CU_ASSERT_FATAL(bdev != NULL); 527 528 bdev->name = name; 529 bdev->fn_table = &fn_table; 530 bdev->module = &vbdev_ut_if; 531 532 rc = spdk_bdev_register(bdev); 533 poll_threads(); 534 CU_ASSERT(rc == 0); 535 536 return bdev; 537 } 538 539 static void 540 free_bdev(struct spdk_bdev *bdev) 541 { 542 spdk_bdev_unregister(bdev, NULL, NULL); 543 poll_threads(); 544 memset(bdev, 0xFF, sizeof(*bdev)); 545 free(bdev); 546 } 547 548 static void 549 free_vbdev(struct spdk_bdev *bdev) 550 { 551 spdk_bdev_unregister(bdev, NULL, NULL); 552 poll_threads(); 553 memset(bdev, 0xFF, sizeof(*bdev)); 554 free(bdev); 555 } 556 557 static void 558 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 559 { 560 const char *bdev_name; 561 562 CU_ASSERT(bdev != NULL); 563 CU_ASSERT(rc == 0); 564 bdev_name = spdk_bdev_get_name(bdev); 565 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 566 567 free(stat); 568 569 *(bool *)cb_arg = true; 570 } 571 572 static void 573 bdev_unregister_cb(void *cb_arg, int rc) 574 { 575 g_unregister_arg = cb_arg; 576 g_unregister_rc = rc; 577 } 578 579 static void 580 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 581 { 582 } 583 584 static void 585 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 586 { 587 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 588 589 g_event_type1 = type; 590 if (SPDK_BDEV_EVENT_REMOVE == type) { 591 spdk_bdev_close(desc); 592 } 593 } 594 595 static void 596 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 597 { 598 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 599 600 g_event_type2 = type; 601 if (SPDK_BDEV_EVENT_REMOVE == type) { 602 spdk_bdev_close(desc); 603 } 604 } 605 606 static void 607 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 608 { 609 g_event_type3 = type; 610 } 611 612 static void 613 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 614 { 615 g_event_type4 = type; 616 } 617 618 static void 619 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 620 { 621 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 622 spdk_bdev_free_io(bdev_io); 623 } 624 625 static void 626 get_device_stat_test(void) 627 { 628 struct spdk_bdev *bdev; 629 struct spdk_bdev_io_stat *stat; 630 bool done; 631 632 bdev = allocate_bdev("bdev0"); 633 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 634 if (stat == NULL) { 635 free_bdev(bdev); 636 return; 637 } 638 639 done = false; 640 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 641 while (!done) { poll_threads(); } 642 643 free_bdev(bdev); 644 } 645 646 static void 647 open_write_test(void) 648 { 649 struct spdk_bdev *bdev[9]; 650 struct spdk_bdev_desc *desc[9] = {}; 651 int rc; 652 653 /* 654 * Create a tree of bdevs to test various open w/ write cases. 655 * 656 * bdev0 through bdev3 are physical block devices, such as NVMe 657 * namespaces or Ceph block devices. 658 * 659 * bdev4 is a virtual bdev with multiple base bdevs. This models 660 * caching or RAID use cases. 661 * 662 * bdev5 through bdev7 are all virtual bdevs with the same base 663 * bdev (except bdev7). This models partitioning or logical volume 664 * use cases. 665 * 666 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 667 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 668 * models caching, RAID, partitioning or logical volumes use cases. 669 * 670 * bdev8 is a virtual bdev with multiple base bdevs, but these 671 * base bdevs are themselves virtual bdevs. 672 * 673 * bdev8 674 * | 675 * +----------+ 676 * | | 677 * bdev4 bdev5 bdev6 bdev7 678 * | | | | 679 * +---+---+ +---+ + +---+---+ 680 * | | \ | / \ 681 * bdev0 bdev1 bdev2 bdev3 682 */ 683 684 bdev[0] = allocate_bdev("bdev0"); 685 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 686 CU_ASSERT(rc == 0); 687 688 bdev[1] = allocate_bdev("bdev1"); 689 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 690 CU_ASSERT(rc == 0); 691 692 bdev[2] = allocate_bdev("bdev2"); 693 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 694 CU_ASSERT(rc == 0); 695 696 bdev[3] = allocate_bdev("bdev3"); 697 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 698 CU_ASSERT(rc == 0); 699 700 bdev[4] = allocate_vbdev("bdev4"); 701 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 702 CU_ASSERT(rc == 0); 703 704 bdev[5] = allocate_vbdev("bdev5"); 705 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 706 CU_ASSERT(rc == 0); 707 708 bdev[6] = allocate_vbdev("bdev6"); 709 710 bdev[7] = allocate_vbdev("bdev7"); 711 712 bdev[8] = allocate_vbdev("bdev8"); 713 714 /* Open bdev0 read-only. This should succeed. */ 715 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 716 CU_ASSERT(rc == 0); 717 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 718 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 719 spdk_bdev_close(desc[0]); 720 721 /* 722 * Open bdev1 read/write. This should fail since bdev1 has been claimed 723 * by a vbdev module. 724 */ 725 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 726 CU_ASSERT(rc == -EPERM); 727 728 /* 729 * Open bdev4 read/write. This should fail since bdev3 has been claimed 730 * by a vbdev module. 731 */ 732 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 733 CU_ASSERT(rc == -EPERM); 734 735 /* Open bdev4 read-only. This should succeed. */ 736 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 737 CU_ASSERT(rc == 0); 738 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 739 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 740 spdk_bdev_close(desc[4]); 741 742 /* 743 * Open bdev8 read/write. This should succeed since it is a leaf 744 * bdev. 745 */ 746 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 747 CU_ASSERT(rc == 0); 748 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 749 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 750 spdk_bdev_close(desc[8]); 751 752 /* 753 * Open bdev5 read/write. This should fail since bdev4 has been claimed 754 * by a vbdev module. 755 */ 756 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 757 CU_ASSERT(rc == -EPERM); 758 759 /* Open bdev4 read-only. This should succeed. */ 760 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 761 CU_ASSERT(rc == 0); 762 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 763 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 764 spdk_bdev_close(desc[5]); 765 766 free_vbdev(bdev[8]); 767 768 free_vbdev(bdev[5]); 769 free_vbdev(bdev[6]); 770 free_vbdev(bdev[7]); 771 772 free_vbdev(bdev[4]); 773 774 free_bdev(bdev[0]); 775 free_bdev(bdev[1]); 776 free_bdev(bdev[2]); 777 free_bdev(bdev[3]); 778 } 779 780 static void 781 claim_test(void) 782 { 783 struct spdk_bdev *bdev; 784 struct spdk_bdev_desc *desc, *open_desc; 785 int rc; 786 uint32_t count; 787 788 /* 789 * A vbdev that uses a read-only bdev may need it to remain read-only. 790 * To do so, it opens the bdev read-only, then claims it without 791 * passing a spdk_bdev_desc. 792 */ 793 bdev = allocate_bdev("bdev0"); 794 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 795 CU_ASSERT(rc == 0); 796 CU_ASSERT(desc->write == false); 797 798 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 799 CU_ASSERT(rc == 0); 800 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 801 802 /* There should be only one open descriptor and it should still be ro */ 803 count = 0; 804 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 805 CU_ASSERT(open_desc == desc); 806 CU_ASSERT(!open_desc->write); 807 count++; 808 } 809 CU_ASSERT(count == 1); 810 811 /* A read-only bdev is upgraded to read-write if desc is passed. */ 812 spdk_bdev_module_release_bdev(bdev); 813 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 814 CU_ASSERT(rc == 0); 815 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 816 817 /* There should be only one open descriptor and it should be rw */ 818 count = 0; 819 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 820 CU_ASSERT(open_desc == desc); 821 CU_ASSERT(open_desc->write); 822 count++; 823 } 824 CU_ASSERT(count == 1); 825 826 spdk_bdev_close(desc); 827 free_bdev(bdev); 828 } 829 830 static void 831 bytes_to_blocks_test(void) 832 { 833 struct spdk_bdev bdev; 834 uint64_t offset_blocks, num_blocks; 835 836 memset(&bdev, 0, sizeof(bdev)); 837 838 bdev.blocklen = 512; 839 840 /* All parameters valid */ 841 offset_blocks = 0; 842 num_blocks = 0; 843 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 844 CU_ASSERT(offset_blocks == 1); 845 CU_ASSERT(num_blocks == 2); 846 847 /* Offset not a block multiple */ 848 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 849 850 /* Length not a block multiple */ 851 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 852 853 /* In case blocklen not the power of two */ 854 bdev.blocklen = 100; 855 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 856 CU_ASSERT(offset_blocks == 1); 857 CU_ASSERT(num_blocks == 2); 858 859 /* Offset not a block multiple */ 860 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 861 862 /* Length not a block multiple */ 863 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 864 } 865 866 static void 867 num_blocks_test(void) 868 { 869 struct spdk_bdev bdev; 870 struct spdk_bdev_desc *desc = NULL; 871 int rc; 872 873 memset(&bdev, 0, sizeof(bdev)); 874 bdev.name = "num_blocks"; 875 bdev.fn_table = &fn_table; 876 bdev.module = &bdev_ut_if; 877 spdk_bdev_register(&bdev); 878 poll_threads(); 879 spdk_bdev_notify_blockcnt_change(&bdev, 50); 880 881 /* Growing block number */ 882 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 883 /* Shrinking block number */ 884 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 885 886 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 887 CU_ASSERT(rc == 0); 888 SPDK_CU_ASSERT_FATAL(desc != NULL); 889 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 890 891 /* Growing block number */ 892 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 893 /* Shrinking block number */ 894 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 895 896 g_event_type1 = 0xFF; 897 /* Growing block number */ 898 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 899 900 poll_threads(); 901 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 902 903 g_event_type1 = 0xFF; 904 /* Growing block number and closing */ 905 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 906 907 spdk_bdev_close(desc); 908 spdk_bdev_unregister(&bdev, NULL, NULL); 909 910 poll_threads(); 911 912 /* Callback is not called for closed device */ 913 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 914 } 915 916 static void 917 io_valid_test(void) 918 { 919 struct spdk_bdev bdev; 920 921 memset(&bdev, 0, sizeof(bdev)); 922 923 bdev.blocklen = 512; 924 spdk_spin_init(&bdev.internal.spinlock); 925 926 spdk_bdev_notify_blockcnt_change(&bdev, 100); 927 928 /* All parameters valid */ 929 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 930 931 /* Last valid block */ 932 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 933 934 /* Offset past end of bdev */ 935 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 936 937 /* Offset + length past end of bdev */ 938 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 939 940 /* Offset near end of uint64_t range (2^64 - 1) */ 941 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 942 943 spdk_spin_destroy(&bdev.internal.spinlock); 944 } 945 946 static void 947 alias_add_del_test(void) 948 { 949 struct spdk_bdev *bdev[3]; 950 int rc; 951 952 /* Creating and registering bdevs */ 953 bdev[0] = allocate_bdev("bdev0"); 954 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 955 956 bdev[1] = allocate_bdev("bdev1"); 957 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 958 959 bdev[2] = allocate_bdev("bdev2"); 960 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 961 962 poll_threads(); 963 964 /* 965 * Trying adding an alias identical to name. 966 * Alias is identical to name, so it can not be added to aliases list 967 */ 968 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 969 CU_ASSERT(rc == -EEXIST); 970 971 /* 972 * Trying to add empty alias, 973 * this one should fail 974 */ 975 rc = spdk_bdev_alias_add(bdev[0], NULL); 976 CU_ASSERT(rc == -EINVAL); 977 978 /* Trying adding same alias to two different registered bdevs */ 979 980 /* Alias is used first time, so this one should pass */ 981 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 982 CU_ASSERT(rc == 0); 983 984 /* Alias was added to another bdev, so this one should fail */ 985 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 986 CU_ASSERT(rc == -EEXIST); 987 988 /* Alias is used first time, so this one should pass */ 989 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 990 CU_ASSERT(rc == 0); 991 992 /* Trying removing an alias from registered bdevs */ 993 994 /* Alias is not on a bdev aliases list, so this one should fail */ 995 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 996 CU_ASSERT(rc == -ENOENT); 997 998 /* Alias is present on a bdev aliases list, so this one should pass */ 999 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1000 CU_ASSERT(rc == 0); 1001 1002 /* Alias is present on a bdev aliases list, so this one should pass */ 1003 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1004 CU_ASSERT(rc == 0); 1005 1006 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1007 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1008 CU_ASSERT(rc != 0); 1009 1010 /* Trying to del all alias from empty alias list */ 1011 spdk_bdev_alias_del_all(bdev[2]); 1012 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1013 1014 /* Trying to del all alias from non-empty alias list */ 1015 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1016 CU_ASSERT(rc == 0); 1017 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1018 CU_ASSERT(rc == 0); 1019 spdk_bdev_alias_del_all(bdev[2]); 1020 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1021 1022 /* Unregister and free bdevs */ 1023 spdk_bdev_unregister(bdev[0], NULL, NULL); 1024 spdk_bdev_unregister(bdev[1], NULL, NULL); 1025 spdk_bdev_unregister(bdev[2], NULL, NULL); 1026 1027 poll_threads(); 1028 1029 free(bdev[0]); 1030 free(bdev[1]); 1031 free(bdev[2]); 1032 } 1033 1034 static void 1035 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1036 { 1037 g_io_done = true; 1038 g_io_status = bdev_io->internal.status; 1039 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1040 (bdev_io->u.bdev.zcopy.start)) { 1041 g_zcopy_bdev_io = bdev_io; 1042 } else { 1043 spdk_bdev_free_io(bdev_io); 1044 g_zcopy_bdev_io = NULL; 1045 } 1046 } 1047 1048 static void 1049 bdev_init_cb(void *arg, int rc) 1050 { 1051 CU_ASSERT(rc == 0); 1052 } 1053 1054 static void 1055 bdev_fini_cb(void *arg) 1056 { 1057 } 1058 1059 static void 1060 ut_init_bdev(struct spdk_bdev_opts *opts) 1061 { 1062 int rc; 1063 1064 if (opts != NULL) { 1065 rc = spdk_bdev_set_opts(opts); 1066 CU_ASSERT(rc == 0); 1067 } 1068 rc = spdk_iobuf_initialize(); 1069 CU_ASSERT(rc == 0); 1070 spdk_bdev_initialize(bdev_init_cb, NULL); 1071 poll_threads(); 1072 } 1073 1074 static void 1075 ut_fini_bdev(void) 1076 { 1077 spdk_bdev_finish(bdev_fini_cb, NULL); 1078 spdk_iobuf_finish(bdev_fini_cb, NULL); 1079 poll_threads(); 1080 } 1081 1082 struct bdev_ut_io_wait_entry { 1083 struct spdk_bdev_io_wait_entry entry; 1084 struct spdk_io_channel *io_ch; 1085 struct spdk_bdev_desc *desc; 1086 bool submitted; 1087 }; 1088 1089 static void 1090 io_wait_cb(void *arg) 1091 { 1092 struct bdev_ut_io_wait_entry *entry = arg; 1093 int rc; 1094 1095 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1096 CU_ASSERT(rc == 0); 1097 entry->submitted = true; 1098 } 1099 1100 static void 1101 bdev_io_types_test(void) 1102 { 1103 struct spdk_bdev *bdev; 1104 struct spdk_bdev_desc *desc = NULL; 1105 struct spdk_io_channel *io_ch; 1106 struct spdk_bdev_opts bdev_opts = {}; 1107 int rc; 1108 1109 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1110 bdev_opts.bdev_io_pool_size = 4; 1111 bdev_opts.bdev_io_cache_size = 2; 1112 ut_init_bdev(&bdev_opts); 1113 1114 bdev = allocate_bdev("bdev0"); 1115 1116 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1117 CU_ASSERT(rc == 0); 1118 poll_threads(); 1119 SPDK_CU_ASSERT_FATAL(desc != NULL); 1120 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1121 io_ch = spdk_bdev_get_io_channel(desc); 1122 CU_ASSERT(io_ch != NULL); 1123 1124 /* WRITE and WRITE ZEROES are not supported */ 1125 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1126 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1127 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1128 CU_ASSERT(rc == -ENOTSUP); 1129 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1130 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1131 1132 /* COPY is not supported */ 1133 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 1134 rc = spdk_bdev_copy_blocks(desc, io_ch, 128, 0, 128, io_done, NULL); 1135 CU_ASSERT(rc == -ENOTSUP); 1136 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 1137 1138 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1139 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1140 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1141 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1142 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1143 CU_ASSERT(rc == -ENOTSUP); 1144 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1145 CU_ASSERT(rc == -ENOTSUP); 1146 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1147 CU_ASSERT(rc == -ENOTSUP); 1148 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1149 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1150 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1151 1152 spdk_put_io_channel(io_ch); 1153 spdk_bdev_close(desc); 1154 free_bdev(bdev); 1155 ut_fini_bdev(); 1156 } 1157 1158 static void 1159 bdev_io_wait_test(void) 1160 { 1161 struct spdk_bdev *bdev; 1162 struct spdk_bdev_desc *desc = NULL; 1163 struct spdk_io_channel *io_ch; 1164 struct spdk_bdev_opts bdev_opts = {}; 1165 struct bdev_ut_io_wait_entry io_wait_entry; 1166 struct bdev_ut_io_wait_entry io_wait_entry2; 1167 int rc; 1168 1169 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1170 bdev_opts.bdev_io_pool_size = 4; 1171 bdev_opts.bdev_io_cache_size = 2; 1172 ut_init_bdev(&bdev_opts); 1173 1174 bdev = allocate_bdev("bdev0"); 1175 1176 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1177 CU_ASSERT(rc == 0); 1178 poll_threads(); 1179 SPDK_CU_ASSERT_FATAL(desc != NULL); 1180 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1181 io_ch = spdk_bdev_get_io_channel(desc); 1182 CU_ASSERT(io_ch != NULL); 1183 1184 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1185 CU_ASSERT(rc == 0); 1186 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1187 CU_ASSERT(rc == 0); 1188 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1189 CU_ASSERT(rc == 0); 1190 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1191 CU_ASSERT(rc == 0); 1192 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1193 1194 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1195 CU_ASSERT(rc == -ENOMEM); 1196 1197 io_wait_entry.entry.bdev = bdev; 1198 io_wait_entry.entry.cb_fn = io_wait_cb; 1199 io_wait_entry.entry.cb_arg = &io_wait_entry; 1200 io_wait_entry.io_ch = io_ch; 1201 io_wait_entry.desc = desc; 1202 io_wait_entry.submitted = false; 1203 /* Cannot use the same io_wait_entry for two different calls. */ 1204 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1205 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1206 1207 /* Queue two I/O waits. */ 1208 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1209 CU_ASSERT(rc == 0); 1210 CU_ASSERT(io_wait_entry.submitted == false); 1211 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1212 CU_ASSERT(rc == 0); 1213 CU_ASSERT(io_wait_entry2.submitted == false); 1214 1215 stub_complete_io(1); 1216 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1217 CU_ASSERT(io_wait_entry.submitted == true); 1218 CU_ASSERT(io_wait_entry2.submitted == false); 1219 1220 stub_complete_io(1); 1221 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1222 CU_ASSERT(io_wait_entry2.submitted == true); 1223 1224 stub_complete_io(4); 1225 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1226 1227 spdk_put_io_channel(io_ch); 1228 spdk_bdev_close(desc); 1229 free_bdev(bdev); 1230 ut_fini_bdev(); 1231 } 1232 1233 static void 1234 bdev_io_spans_split_test(void) 1235 { 1236 struct spdk_bdev bdev; 1237 struct spdk_bdev_io bdev_io; 1238 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1239 1240 memset(&bdev, 0, sizeof(bdev)); 1241 bdev_io.u.bdev.iovs = iov; 1242 1243 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1244 bdev.optimal_io_boundary = 0; 1245 bdev.max_segment_size = 0; 1246 bdev.max_num_segments = 0; 1247 bdev_io.bdev = &bdev; 1248 1249 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1250 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1251 1252 bdev.split_on_optimal_io_boundary = true; 1253 bdev.optimal_io_boundary = 32; 1254 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1255 1256 /* RESETs are not based on LBAs - so this should return false. */ 1257 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1258 1259 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1260 bdev_io.u.bdev.offset_blocks = 0; 1261 bdev_io.u.bdev.num_blocks = 32; 1262 1263 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1264 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1265 1266 bdev_io.u.bdev.num_blocks = 33; 1267 1268 /* This I/O spans a boundary. */ 1269 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1270 1271 bdev_io.u.bdev.num_blocks = 32; 1272 bdev.max_segment_size = 512 * 32; 1273 bdev.max_num_segments = 1; 1274 bdev_io.u.bdev.iovcnt = 1; 1275 iov[0].iov_len = 512; 1276 1277 /* Does not cross and exceed max_size or max_segs */ 1278 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1279 1280 bdev.split_on_optimal_io_boundary = false; 1281 bdev.max_segment_size = 512; 1282 bdev.max_num_segments = 1; 1283 bdev_io.u.bdev.iovcnt = 2; 1284 1285 /* Exceed max_segs */ 1286 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1287 1288 bdev.max_num_segments = 2; 1289 iov[0].iov_len = 513; 1290 iov[1].iov_len = 512; 1291 1292 /* Exceed max_sizes */ 1293 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1294 1295 bdev.max_segment_size = 0; 1296 bdev.write_unit_size = 32; 1297 bdev.split_on_write_unit = true; 1298 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1299 1300 /* This I/O is one write unit */ 1301 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1302 1303 bdev_io.u.bdev.num_blocks = 32 * 2; 1304 1305 /* This I/O is more than one write unit */ 1306 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1307 1308 bdev_io.u.bdev.offset_blocks = 1; 1309 bdev_io.u.bdev.num_blocks = 32; 1310 1311 /* This I/O is not aligned to write unit size */ 1312 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1313 } 1314 1315 static void 1316 bdev_io_boundary_split_test(void) 1317 { 1318 struct spdk_bdev *bdev; 1319 struct spdk_bdev_desc *desc = NULL; 1320 struct spdk_io_channel *io_ch; 1321 struct spdk_bdev_opts bdev_opts = {}; 1322 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1323 struct ut_expected_io *expected_io; 1324 void *md_buf = (void *)0xFF000000; 1325 uint64_t i; 1326 int rc; 1327 1328 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1329 bdev_opts.bdev_io_pool_size = 512; 1330 bdev_opts.bdev_io_cache_size = 64; 1331 ut_init_bdev(&bdev_opts); 1332 1333 bdev = allocate_bdev("bdev0"); 1334 1335 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1336 CU_ASSERT(rc == 0); 1337 SPDK_CU_ASSERT_FATAL(desc != NULL); 1338 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1339 io_ch = spdk_bdev_get_io_channel(desc); 1340 CU_ASSERT(io_ch != NULL); 1341 1342 bdev->optimal_io_boundary = 16; 1343 bdev->split_on_optimal_io_boundary = false; 1344 1345 g_io_done = false; 1346 1347 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1348 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1349 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1350 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1351 1352 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1353 CU_ASSERT(rc == 0); 1354 CU_ASSERT(g_io_done == false); 1355 1356 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1357 stub_complete_io(1); 1358 CU_ASSERT(g_io_done == true); 1359 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1360 1361 bdev->split_on_optimal_io_boundary = true; 1362 bdev->md_interleave = false; 1363 bdev->md_len = 8; 1364 1365 /* Now test that a single-vector command is split correctly. 1366 * Offset 14, length 8, payload 0xF000 1367 * Child - Offset 14, length 2, payload 0xF000 1368 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1369 * 1370 * Set up the expected values before calling spdk_bdev_read_blocks 1371 */ 1372 g_io_done = false; 1373 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1374 expected_io->md_buf = md_buf; 1375 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1376 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1377 1378 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1379 expected_io->md_buf = md_buf + 2 * 8; 1380 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1382 1383 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1384 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1385 14, 8, io_done, NULL); 1386 CU_ASSERT(rc == 0); 1387 CU_ASSERT(g_io_done == false); 1388 1389 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1390 stub_complete_io(2); 1391 CU_ASSERT(g_io_done == true); 1392 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1393 1394 /* Now set up a more complex, multi-vector command that needs to be split, 1395 * including splitting iovecs. 1396 */ 1397 iov[0].iov_base = (void *)0x10000; 1398 iov[0].iov_len = 512; 1399 iov[1].iov_base = (void *)0x20000; 1400 iov[1].iov_len = 20 * 512; 1401 iov[2].iov_base = (void *)0x30000; 1402 iov[2].iov_len = 11 * 512; 1403 1404 g_io_done = false; 1405 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1406 expected_io->md_buf = md_buf; 1407 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1408 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1410 1411 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1412 expected_io->md_buf = md_buf + 2 * 8; 1413 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1415 1416 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1417 expected_io->md_buf = md_buf + 18 * 8; 1418 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1419 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1420 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1421 1422 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1423 14, 32, io_done, NULL); 1424 CU_ASSERT(rc == 0); 1425 CU_ASSERT(g_io_done == false); 1426 1427 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1428 stub_complete_io(3); 1429 CU_ASSERT(g_io_done == true); 1430 1431 /* Test multi vector command that needs to be split by strip and then needs to be 1432 * split further due to the capacity of child iovs. 1433 */ 1434 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1435 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1436 iov[i].iov_len = 512; 1437 } 1438 1439 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1440 g_io_done = false; 1441 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1442 SPDK_BDEV_IO_NUM_CHILD_IOV); 1443 expected_io->md_buf = md_buf; 1444 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1445 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1446 } 1447 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1448 1449 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1450 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1451 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1452 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1453 ut_expected_io_set_iov(expected_io, i, 1454 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1455 } 1456 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1457 1458 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1459 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1460 CU_ASSERT(rc == 0); 1461 CU_ASSERT(g_io_done == false); 1462 1463 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1464 stub_complete_io(1); 1465 CU_ASSERT(g_io_done == false); 1466 1467 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1468 stub_complete_io(1); 1469 CU_ASSERT(g_io_done == true); 1470 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1471 1472 /* Test multi vector command that needs to be split by strip and then needs to be 1473 * split further due to the capacity of child iovs. In this case, the length of 1474 * the rest of iovec array with an I/O boundary is the multiple of block size. 1475 */ 1476 1477 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1478 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1479 */ 1480 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1481 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1482 iov[i].iov_len = 512; 1483 } 1484 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1485 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1486 iov[i].iov_len = 256; 1487 } 1488 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1489 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1490 1491 /* Add an extra iovec to trigger split */ 1492 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1493 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1494 1495 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1496 g_io_done = false; 1497 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1498 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1499 expected_io->md_buf = md_buf; 1500 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1501 ut_expected_io_set_iov(expected_io, i, 1502 (void *)((i + 1) * 0x10000), 512); 1503 } 1504 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1505 ut_expected_io_set_iov(expected_io, i, 1506 (void *)((i + 1) * 0x10000), 256); 1507 } 1508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1509 1510 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1511 1, 1); 1512 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1513 ut_expected_io_set_iov(expected_io, 0, 1514 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1516 1517 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1518 1, 1); 1519 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1520 ut_expected_io_set_iov(expected_io, 0, 1521 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1522 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1523 1524 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1525 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1526 CU_ASSERT(rc == 0); 1527 CU_ASSERT(g_io_done == false); 1528 1529 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1530 stub_complete_io(1); 1531 CU_ASSERT(g_io_done == false); 1532 1533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1534 stub_complete_io(2); 1535 CU_ASSERT(g_io_done == true); 1536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1537 1538 /* Test multi vector command that needs to be split by strip and then needs to be 1539 * split further due to the capacity of child iovs, the child request offset should 1540 * be rewind to last aligned offset and go success without error. 1541 */ 1542 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1543 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1544 iov[i].iov_len = 512; 1545 } 1546 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1547 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1548 1549 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1550 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1551 1552 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1553 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1554 1555 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1556 g_io_done = false; 1557 g_io_status = 0; 1558 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1559 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1560 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1561 expected_io->md_buf = md_buf; 1562 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1563 ut_expected_io_set_iov(expected_io, i, 1564 (void *)((i + 1) * 0x10000), 512); 1565 } 1566 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1567 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1568 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1569 1, 2); 1570 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1571 ut_expected_io_set_iov(expected_io, 0, 1572 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1573 ut_expected_io_set_iov(expected_io, 1, 1574 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1575 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1576 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1577 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1578 1, 1); 1579 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1580 ut_expected_io_set_iov(expected_io, 0, 1581 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1582 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1583 1584 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1585 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1586 CU_ASSERT(rc == 0); 1587 CU_ASSERT(g_io_done == false); 1588 1589 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1590 stub_complete_io(1); 1591 CU_ASSERT(g_io_done == false); 1592 1593 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1594 stub_complete_io(2); 1595 CU_ASSERT(g_io_done == true); 1596 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1597 1598 /* Test multi vector command that needs to be split due to the IO boundary and 1599 * the capacity of child iovs. Especially test the case when the command is 1600 * split due to the capacity of child iovs, the tail address is not aligned with 1601 * block size and is rewinded to the aligned address. 1602 * 1603 * The iovecs used in read request is complex but is based on the data 1604 * collected in the real issue. We change the base addresses but keep the lengths 1605 * not to loose the credibility of the test. 1606 */ 1607 bdev->optimal_io_boundary = 128; 1608 g_io_done = false; 1609 g_io_status = 0; 1610 1611 for (i = 0; i < 31; i++) { 1612 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1613 iov[i].iov_len = 1024; 1614 } 1615 iov[31].iov_base = (void *)0xFEED1F00000; 1616 iov[31].iov_len = 32768; 1617 iov[32].iov_base = (void *)0xFEED2000000; 1618 iov[32].iov_len = 160; 1619 iov[33].iov_base = (void *)0xFEED2100000; 1620 iov[33].iov_len = 4096; 1621 iov[34].iov_base = (void *)0xFEED2200000; 1622 iov[34].iov_len = 4096; 1623 iov[35].iov_base = (void *)0xFEED2300000; 1624 iov[35].iov_len = 4096; 1625 iov[36].iov_base = (void *)0xFEED2400000; 1626 iov[36].iov_len = 4096; 1627 iov[37].iov_base = (void *)0xFEED2500000; 1628 iov[37].iov_len = 4096; 1629 iov[38].iov_base = (void *)0xFEED2600000; 1630 iov[38].iov_len = 4096; 1631 iov[39].iov_base = (void *)0xFEED2700000; 1632 iov[39].iov_len = 4096; 1633 iov[40].iov_base = (void *)0xFEED2800000; 1634 iov[40].iov_len = 4096; 1635 iov[41].iov_base = (void *)0xFEED2900000; 1636 iov[41].iov_len = 4096; 1637 iov[42].iov_base = (void *)0xFEED2A00000; 1638 iov[42].iov_len = 4096; 1639 iov[43].iov_base = (void *)0xFEED2B00000; 1640 iov[43].iov_len = 12288; 1641 iov[44].iov_base = (void *)0xFEED2C00000; 1642 iov[44].iov_len = 8192; 1643 iov[45].iov_base = (void *)0xFEED2F00000; 1644 iov[45].iov_len = 4096; 1645 iov[46].iov_base = (void *)0xFEED3000000; 1646 iov[46].iov_len = 4096; 1647 iov[47].iov_base = (void *)0xFEED3100000; 1648 iov[47].iov_len = 4096; 1649 iov[48].iov_base = (void *)0xFEED3200000; 1650 iov[48].iov_len = 24576; 1651 iov[49].iov_base = (void *)0xFEED3300000; 1652 iov[49].iov_len = 16384; 1653 iov[50].iov_base = (void *)0xFEED3400000; 1654 iov[50].iov_len = 12288; 1655 iov[51].iov_base = (void *)0xFEED3500000; 1656 iov[51].iov_len = 4096; 1657 iov[52].iov_base = (void *)0xFEED3600000; 1658 iov[52].iov_len = 4096; 1659 iov[53].iov_base = (void *)0xFEED3700000; 1660 iov[53].iov_len = 4096; 1661 iov[54].iov_base = (void *)0xFEED3800000; 1662 iov[54].iov_len = 28672; 1663 iov[55].iov_base = (void *)0xFEED3900000; 1664 iov[55].iov_len = 20480; 1665 iov[56].iov_base = (void *)0xFEED3A00000; 1666 iov[56].iov_len = 4096; 1667 iov[57].iov_base = (void *)0xFEED3B00000; 1668 iov[57].iov_len = 12288; 1669 iov[58].iov_base = (void *)0xFEED3C00000; 1670 iov[58].iov_len = 4096; 1671 iov[59].iov_base = (void *)0xFEED3D00000; 1672 iov[59].iov_len = 4096; 1673 iov[60].iov_base = (void *)0xFEED3E00000; 1674 iov[60].iov_len = 352; 1675 1676 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1677 * of child iovs, 1678 */ 1679 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1680 expected_io->md_buf = md_buf; 1681 for (i = 0; i < 32; i++) { 1682 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1683 } 1684 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1685 1686 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1687 * split by the IO boundary requirement. 1688 */ 1689 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1690 expected_io->md_buf = md_buf + 126 * 8; 1691 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1692 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1693 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1694 1695 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1696 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1697 */ 1698 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1699 expected_io->md_buf = md_buf + 128 * 8; 1700 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1701 iov[33].iov_len - 864); 1702 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1703 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1704 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1705 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1706 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1707 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1708 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1709 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1710 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1711 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1712 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1713 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1714 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1715 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1716 1717 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1718 * first 864 bytes of iov[52] split by the IO boundary requirement. 1719 */ 1720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1721 expected_io->md_buf = md_buf + 256 * 8; 1722 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1723 iov[46].iov_len - 864); 1724 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1725 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1726 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1727 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1728 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1729 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1731 1732 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1733 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1734 */ 1735 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1736 expected_io->md_buf = md_buf + 384 * 8; 1737 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1738 iov[52].iov_len - 864); 1739 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1740 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1741 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1742 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1743 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1744 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1745 1746 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1747 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1748 */ 1749 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1750 expected_io->md_buf = md_buf + 512 * 8; 1751 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1752 iov[57].iov_len - 4960); 1753 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1754 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1755 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1756 1757 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1758 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1759 expected_io->md_buf = md_buf + 542 * 8; 1760 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1761 iov[59].iov_len - 3936); 1762 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1763 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1764 1765 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1766 0, 543, io_done, NULL); 1767 CU_ASSERT(rc == 0); 1768 CU_ASSERT(g_io_done == false); 1769 1770 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1771 stub_complete_io(1); 1772 CU_ASSERT(g_io_done == false); 1773 1774 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1775 stub_complete_io(5); 1776 CU_ASSERT(g_io_done == false); 1777 1778 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1779 stub_complete_io(1); 1780 CU_ASSERT(g_io_done == true); 1781 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1782 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1783 1784 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1785 * split, so test that. 1786 */ 1787 bdev->optimal_io_boundary = 15; 1788 g_io_done = false; 1789 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1790 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1791 1792 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1793 CU_ASSERT(rc == 0); 1794 CU_ASSERT(g_io_done == false); 1795 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1796 stub_complete_io(1); 1797 CU_ASSERT(g_io_done == true); 1798 1799 /* Test an UNMAP. This should also not be split. */ 1800 bdev->optimal_io_boundary = 16; 1801 g_io_done = false; 1802 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1803 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1804 1805 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1806 CU_ASSERT(rc == 0); 1807 CU_ASSERT(g_io_done == false); 1808 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1809 stub_complete_io(1); 1810 CU_ASSERT(g_io_done == true); 1811 1812 /* Test a FLUSH. This should also not be split. */ 1813 bdev->optimal_io_boundary = 16; 1814 g_io_done = false; 1815 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1816 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1817 1818 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1819 CU_ASSERT(rc == 0); 1820 CU_ASSERT(g_io_done == false); 1821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1822 stub_complete_io(1); 1823 CU_ASSERT(g_io_done == true); 1824 1825 /* Test a COPY. This should also not be split. */ 1826 bdev->optimal_io_boundary = 15; 1827 g_io_done = false; 1828 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1829 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1830 1831 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1832 CU_ASSERT(rc == 0); 1833 CU_ASSERT(g_io_done == false); 1834 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1835 stub_complete_io(1); 1836 CU_ASSERT(g_io_done == true); 1837 1838 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1839 1840 /* Children requests return an error status */ 1841 bdev->optimal_io_boundary = 16; 1842 iov[0].iov_base = (void *)0x10000; 1843 iov[0].iov_len = 512 * 64; 1844 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1845 g_io_done = false; 1846 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1847 1848 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1849 CU_ASSERT(rc == 0); 1850 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1851 stub_complete_io(4); 1852 CU_ASSERT(g_io_done == false); 1853 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1854 stub_complete_io(1); 1855 CU_ASSERT(g_io_done == true); 1856 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1857 1858 /* Test if a multi vector command terminated with failure before continuing 1859 * splitting process when one of child I/O failed. 1860 * The multi vector command is as same as the above that needs to be split by strip 1861 * and then needs to be split further due to the capacity of child iovs. 1862 */ 1863 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1864 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1865 iov[i].iov_len = 512; 1866 } 1867 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1868 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1869 1870 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1871 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1872 1873 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1874 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1875 1876 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1877 1878 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1879 g_io_done = false; 1880 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1881 1882 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1883 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1884 CU_ASSERT(rc == 0); 1885 CU_ASSERT(g_io_done == false); 1886 1887 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1888 stub_complete_io(1); 1889 CU_ASSERT(g_io_done == true); 1890 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1891 1892 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1893 1894 /* for this test we will create the following conditions to hit the code path where 1895 * we are trying to send and IO following a split that has no iovs because we had to 1896 * trim them for alignment reasons. 1897 * 1898 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1899 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1900 * position 30 and overshoot by 0x2e. 1901 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1902 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1903 * which eliniates that vector so we just send the first split IO with 30 vectors 1904 * and let the completion pick up the last 2 vectors. 1905 */ 1906 bdev->optimal_io_boundary = 32; 1907 bdev->split_on_optimal_io_boundary = true; 1908 g_io_done = false; 1909 1910 /* Init all parent IOVs to 0x212 */ 1911 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1912 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1913 iov[i].iov_len = 0x212; 1914 } 1915 1916 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1917 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1918 /* expect 0-29 to be 1:1 with the parent iov */ 1919 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1920 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1921 } 1922 1923 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1924 * where 0x1e is the amount we overshot the 16K boundary 1925 */ 1926 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 1927 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1928 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1929 1930 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1931 * shortened that take it to the next boundary and then a final one to get us to 1932 * 0x4200 bytes for the IO. 1933 */ 1934 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1935 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 1936 /* position 30 picked up the remaining bytes to the next boundary */ 1937 ut_expected_io_set_iov(expected_io, 0, 1938 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1939 1940 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1941 ut_expected_io_set_iov(expected_io, 1, 1942 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1943 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1944 1945 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 1946 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1947 CU_ASSERT(rc == 0); 1948 CU_ASSERT(g_io_done == false); 1949 1950 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1951 stub_complete_io(1); 1952 CU_ASSERT(g_io_done == false); 1953 1954 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1955 stub_complete_io(1); 1956 CU_ASSERT(g_io_done == true); 1957 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1958 1959 spdk_put_io_channel(io_ch); 1960 spdk_bdev_close(desc); 1961 free_bdev(bdev); 1962 ut_fini_bdev(); 1963 } 1964 1965 static void 1966 bdev_io_max_size_and_segment_split_test(void) 1967 { 1968 struct spdk_bdev *bdev; 1969 struct spdk_bdev_desc *desc = NULL; 1970 struct spdk_io_channel *io_ch; 1971 struct spdk_bdev_opts bdev_opts = {}; 1972 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1973 struct ut_expected_io *expected_io; 1974 uint64_t i; 1975 int rc; 1976 1977 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1978 bdev_opts.bdev_io_pool_size = 512; 1979 bdev_opts.bdev_io_cache_size = 64; 1980 bdev_opts.opts_size = sizeof(bdev_opts); 1981 ut_init_bdev(&bdev_opts); 1982 1983 bdev = allocate_bdev("bdev0"); 1984 1985 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1986 CU_ASSERT(rc == 0); 1987 SPDK_CU_ASSERT_FATAL(desc != NULL); 1988 io_ch = spdk_bdev_get_io_channel(desc); 1989 CU_ASSERT(io_ch != NULL); 1990 1991 bdev->split_on_optimal_io_boundary = false; 1992 bdev->optimal_io_boundary = 0; 1993 1994 /* Case 0 max_num_segments == 0. 1995 * but segment size 2 * 512 > 512 1996 */ 1997 bdev->max_segment_size = 512; 1998 bdev->max_num_segments = 0; 1999 g_io_done = false; 2000 2001 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2002 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2003 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2004 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2005 2006 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2007 CU_ASSERT(rc == 0); 2008 CU_ASSERT(g_io_done == false); 2009 2010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2011 stub_complete_io(1); 2012 CU_ASSERT(g_io_done == true); 2013 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2014 2015 /* Case 1 max_segment_size == 0 2016 * but iov num 2 > 1. 2017 */ 2018 bdev->max_segment_size = 0; 2019 bdev->max_num_segments = 1; 2020 g_io_done = false; 2021 2022 iov[0].iov_base = (void *)0x10000; 2023 iov[0].iov_len = 512; 2024 iov[1].iov_base = (void *)0x20000; 2025 iov[1].iov_len = 8 * 512; 2026 2027 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2028 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2029 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2030 2031 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2032 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2033 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2034 2035 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2036 CU_ASSERT(rc == 0); 2037 CU_ASSERT(g_io_done == false); 2038 2039 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2040 stub_complete_io(2); 2041 CU_ASSERT(g_io_done == true); 2042 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2043 2044 /* Test that a non-vector command is split correctly. 2045 * Set up the expected values before calling spdk_bdev_read_blocks 2046 */ 2047 bdev->max_segment_size = 512; 2048 bdev->max_num_segments = 1; 2049 g_io_done = false; 2050 2051 /* Child IO 0 */ 2052 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2053 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2054 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2055 2056 /* Child IO 1 */ 2057 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2058 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2059 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2060 2061 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2062 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2063 CU_ASSERT(rc == 0); 2064 CU_ASSERT(g_io_done == false); 2065 2066 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2067 stub_complete_io(2); 2068 CU_ASSERT(g_io_done == true); 2069 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2070 2071 /* Now set up a more complex, multi-vector command that needs to be split, 2072 * including splitting iovecs. 2073 */ 2074 bdev->max_segment_size = 2 * 512; 2075 bdev->max_num_segments = 1; 2076 g_io_done = false; 2077 2078 iov[0].iov_base = (void *)0x10000; 2079 iov[0].iov_len = 2 * 512; 2080 iov[1].iov_base = (void *)0x20000; 2081 iov[1].iov_len = 4 * 512; 2082 iov[2].iov_base = (void *)0x30000; 2083 iov[2].iov_len = 6 * 512; 2084 2085 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2086 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2087 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2088 2089 /* Split iov[1].size to 2 iov entries then split the segments */ 2090 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2091 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2092 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2093 2094 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2095 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2096 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2097 2098 /* Split iov[2].size to 3 iov entries then split the segments */ 2099 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2100 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2101 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2102 2103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2104 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2106 2107 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2108 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2109 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2110 2111 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2112 CU_ASSERT(rc == 0); 2113 CU_ASSERT(g_io_done == false); 2114 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2116 stub_complete_io(6); 2117 CU_ASSERT(g_io_done == true); 2118 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2119 2120 /* Test multi vector command that needs to be split by strip and then needs to be 2121 * split further due to the capacity of parent IO child iovs. 2122 */ 2123 bdev->max_segment_size = 512; 2124 bdev->max_num_segments = 1; 2125 g_io_done = false; 2126 2127 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2128 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2129 iov[i].iov_len = 512 * 2; 2130 } 2131 2132 /* Each input iov.size is split into 2 iovs, 2133 * half of the input iov can fill all child iov entries of a single IO. 2134 */ 2135 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2136 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2137 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2138 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2139 2140 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2141 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2142 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2143 } 2144 2145 /* The remaining iov is split in the second round */ 2146 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2147 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2148 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2149 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2150 2151 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2152 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2153 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2154 } 2155 2156 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2157 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2158 CU_ASSERT(rc == 0); 2159 CU_ASSERT(g_io_done == false); 2160 2161 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2162 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2163 CU_ASSERT(g_io_done == false); 2164 2165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2166 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2167 CU_ASSERT(g_io_done == true); 2168 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2169 2170 /* A wrong case, a child IO that is divided does 2171 * not meet the principle of multiples of block size, 2172 * and exits with error 2173 */ 2174 bdev->max_segment_size = 512; 2175 bdev->max_num_segments = 1; 2176 g_io_done = false; 2177 2178 iov[0].iov_base = (void *)0x10000; 2179 iov[0].iov_len = 512 + 256; 2180 iov[1].iov_base = (void *)0x20000; 2181 iov[1].iov_len = 256; 2182 2183 /* iov[0] is split to 512 and 256. 2184 * 256 is less than a block size, and it is found 2185 * in the next round of split that it is the first child IO smaller than 2186 * the block size, so the error exit 2187 */ 2188 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2189 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2190 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2191 2192 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2193 CU_ASSERT(rc == 0); 2194 CU_ASSERT(g_io_done == false); 2195 2196 /* First child IO is OK */ 2197 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2198 stub_complete_io(1); 2199 CU_ASSERT(g_io_done == true); 2200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2201 2202 /* error exit */ 2203 stub_complete_io(1); 2204 CU_ASSERT(g_io_done == true); 2205 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2207 2208 /* Test multi vector command that needs to be split by strip and then needs to be 2209 * split further due to the capacity of child iovs. 2210 * 2211 * In this case, the last two iovs need to be split, but it will exceed the capacity 2212 * of child iovs, so it needs to wait until the first batch completed. 2213 */ 2214 bdev->max_segment_size = 512; 2215 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2216 g_io_done = false; 2217 2218 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2219 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2220 iov[i].iov_len = 512; 2221 } 2222 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2223 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2224 iov[i].iov_len = 512 * 2; 2225 } 2226 2227 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2228 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2229 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2230 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2231 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2232 } 2233 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2234 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2235 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2237 2238 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2239 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2240 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2241 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2242 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2243 2244 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2245 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2246 CU_ASSERT(rc == 0); 2247 CU_ASSERT(g_io_done == false); 2248 2249 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2250 stub_complete_io(1); 2251 CU_ASSERT(g_io_done == false); 2252 2253 /* Next round */ 2254 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2255 stub_complete_io(1); 2256 CU_ASSERT(g_io_done == true); 2257 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2258 2259 /* This case is similar to the previous one, but the io composed of 2260 * the last few entries of child iov is not enough for a blocklen, so they 2261 * cannot be put into this IO, but wait until the next time. 2262 */ 2263 bdev->max_segment_size = 512; 2264 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2265 g_io_done = false; 2266 2267 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2268 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2269 iov[i].iov_len = 512; 2270 } 2271 2272 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2273 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2274 iov[i].iov_len = 128; 2275 } 2276 2277 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2278 * Because the left 2 iov is not enough for a blocklen. 2279 */ 2280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2281 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2282 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2283 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2284 } 2285 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2286 2287 /* The second child io waits until the end of the first child io before executing. 2288 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2289 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2290 */ 2291 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2292 1, 4); 2293 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2294 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2295 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2296 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2297 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2298 2299 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2300 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2301 CU_ASSERT(rc == 0); 2302 CU_ASSERT(g_io_done == false); 2303 2304 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2305 stub_complete_io(1); 2306 CU_ASSERT(g_io_done == false); 2307 2308 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2309 stub_complete_io(1); 2310 CU_ASSERT(g_io_done == true); 2311 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2312 2313 /* A very complicated case. Each sg entry exceeds max_segment_size and 2314 * needs to be split. At the same time, child io must be a multiple of blocklen. 2315 * At the same time, child iovcnt exceeds parent iovcnt. 2316 */ 2317 bdev->max_segment_size = 512 + 128; 2318 bdev->max_num_segments = 3; 2319 g_io_done = false; 2320 2321 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2322 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2323 iov[i].iov_len = 512 + 256; 2324 } 2325 2326 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2327 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2328 iov[i].iov_len = 512 + 128; 2329 } 2330 2331 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2332 * Consume 4 parent IO iov entries per for() round and 6 block size. 2333 * Generate 9 child IOs. 2334 */ 2335 for (i = 0; i < 3; i++) { 2336 uint32_t j = i * 4; 2337 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2338 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2339 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2340 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2341 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2342 2343 /* Child io must be a multiple of blocklen 2344 * iov[j + 2] must be split. If the third entry is also added, 2345 * the multiple of blocklen cannot be guaranteed. But it still 2346 * occupies one iov entry of the parent child iov. 2347 */ 2348 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2349 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2350 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2351 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2352 2353 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2354 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2355 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2356 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2357 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2358 } 2359 2360 /* Child iov position at 27, the 10th child IO 2361 * iov entry index is 3 * 4 and offset is 3 * 6 2362 */ 2363 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2364 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2365 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2366 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2367 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2368 2369 /* Child iov position at 30, the 11th child IO */ 2370 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2371 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2372 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2373 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2374 2375 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2376 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2377 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2378 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2379 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2380 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2381 2382 /* Consume 9 child IOs and 27 child iov entries. 2383 * Consume 4 parent IO iov entries per for() round and 6 block size. 2384 * Parent IO iov index start from 16 and block offset start from 24 2385 */ 2386 for (i = 0; i < 3; i++) { 2387 uint32_t j = i * 4 + 16; 2388 uint32_t offset = i * 6 + 24; 2389 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2390 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2391 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2392 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2393 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2394 2395 /* Child io must be a multiple of blocklen 2396 * iov[j + 2] must be split. If the third entry is also added, 2397 * the multiple of blocklen cannot be guaranteed. But it still 2398 * occupies one iov entry of the parent child iov. 2399 */ 2400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2401 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2402 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2403 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2404 2405 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2406 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2407 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2408 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2409 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2410 } 2411 2412 /* The 22th child IO, child iov position at 30 */ 2413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2414 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2415 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2416 2417 /* The third round */ 2418 /* Here is the 23nd child IO and child iovpos is 0 */ 2419 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2420 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2421 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2422 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2424 2425 /* The 24th child IO */ 2426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2427 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2428 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2429 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 2432 /* The 25th child IO */ 2433 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2434 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2435 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2436 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2437 2438 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2439 50, io_done, NULL); 2440 CU_ASSERT(rc == 0); 2441 CU_ASSERT(g_io_done == false); 2442 2443 /* Parent IO supports up to 32 child iovs, so it is calculated that 2444 * a maximum of 11 IOs can be split at a time, and the 2445 * splitting will continue after the first batch is over. 2446 */ 2447 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2448 stub_complete_io(11); 2449 CU_ASSERT(g_io_done == false); 2450 2451 /* The 2nd round */ 2452 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2453 stub_complete_io(11); 2454 CU_ASSERT(g_io_done == false); 2455 2456 /* The last round */ 2457 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2458 stub_complete_io(3); 2459 CU_ASSERT(g_io_done == true); 2460 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2461 2462 /* Test an WRITE_ZEROES. This should also not be split. */ 2463 bdev->max_segment_size = 512; 2464 bdev->max_num_segments = 1; 2465 g_io_done = false; 2466 2467 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2468 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2469 2470 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2471 CU_ASSERT(rc == 0); 2472 CU_ASSERT(g_io_done == false); 2473 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2474 stub_complete_io(1); 2475 CU_ASSERT(g_io_done == true); 2476 2477 /* Test an UNMAP. This should also not be split. */ 2478 g_io_done = false; 2479 2480 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2481 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2482 2483 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2484 CU_ASSERT(rc == 0); 2485 CU_ASSERT(g_io_done == false); 2486 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2487 stub_complete_io(1); 2488 CU_ASSERT(g_io_done == true); 2489 2490 /* Test a FLUSH. This should also not be split. */ 2491 g_io_done = false; 2492 2493 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2494 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2495 2496 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2497 CU_ASSERT(rc == 0); 2498 CU_ASSERT(g_io_done == false); 2499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2500 stub_complete_io(1); 2501 CU_ASSERT(g_io_done == true); 2502 2503 /* Test a COPY. This should also not be split. */ 2504 g_io_done = false; 2505 2506 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2507 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2508 2509 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2510 CU_ASSERT(rc == 0); 2511 CU_ASSERT(g_io_done == false); 2512 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2513 stub_complete_io(1); 2514 CU_ASSERT(g_io_done == true); 2515 2516 spdk_put_io_channel(io_ch); 2517 spdk_bdev_close(desc); 2518 free_bdev(bdev); 2519 ut_fini_bdev(); 2520 } 2521 2522 static void 2523 bdev_io_mix_split_test(void) 2524 { 2525 struct spdk_bdev *bdev; 2526 struct spdk_bdev_desc *desc = NULL; 2527 struct spdk_io_channel *io_ch; 2528 struct spdk_bdev_opts bdev_opts = {}; 2529 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2530 struct ut_expected_io *expected_io; 2531 uint64_t i; 2532 int rc; 2533 2534 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2535 bdev_opts.bdev_io_pool_size = 512; 2536 bdev_opts.bdev_io_cache_size = 64; 2537 ut_init_bdev(&bdev_opts); 2538 2539 bdev = allocate_bdev("bdev0"); 2540 2541 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2542 CU_ASSERT(rc == 0); 2543 SPDK_CU_ASSERT_FATAL(desc != NULL); 2544 io_ch = spdk_bdev_get_io_channel(desc); 2545 CU_ASSERT(io_ch != NULL); 2546 2547 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2548 bdev->split_on_optimal_io_boundary = true; 2549 bdev->optimal_io_boundary = 16; 2550 2551 bdev->max_segment_size = 512; 2552 bdev->max_num_segments = 16; 2553 g_io_done = false; 2554 2555 /* IO crossing the IO boundary requires split 2556 * Total 2 child IOs. 2557 */ 2558 2559 /* The 1st child IO split the segment_size to multiple segment entry */ 2560 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2561 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2562 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2563 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2564 2565 /* The 2nd child IO split the segment_size to multiple segment entry */ 2566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2567 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2568 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2569 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2570 2571 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2572 CU_ASSERT(rc == 0); 2573 CU_ASSERT(g_io_done == false); 2574 2575 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2576 stub_complete_io(2); 2577 CU_ASSERT(g_io_done == true); 2578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2579 2580 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2581 bdev->max_segment_size = 15 * 512; 2582 bdev->max_num_segments = 1; 2583 g_io_done = false; 2584 2585 /* IO crossing the IO boundary requires split. 2586 * The 1st child IO segment size exceeds the max_segment_size, 2587 * So 1st child IO will be split to multiple segment entry. 2588 * Then it split to 2 child IOs because of the max_num_segments. 2589 * Total 3 child IOs. 2590 */ 2591 2592 /* The first 2 IOs are in an IO boundary. 2593 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2594 * So it split to the first 2 IOs. 2595 */ 2596 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2597 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2598 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2599 2600 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2601 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2603 2604 /* The 3rd Child IO is because of the io boundary */ 2605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2606 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2607 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2608 2609 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2610 CU_ASSERT(rc == 0); 2611 CU_ASSERT(g_io_done == false); 2612 2613 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2614 stub_complete_io(3); 2615 CU_ASSERT(g_io_done == true); 2616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2617 2618 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2619 bdev->max_segment_size = 17 * 512; 2620 bdev->max_num_segments = 1; 2621 g_io_done = false; 2622 2623 /* IO crossing the IO boundary requires split. 2624 * Child IO does not split. 2625 * Total 2 child IOs. 2626 */ 2627 2628 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2629 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2630 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2631 2632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2633 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2634 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2635 2636 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2637 CU_ASSERT(rc == 0); 2638 CU_ASSERT(g_io_done == false); 2639 2640 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2641 stub_complete_io(2); 2642 CU_ASSERT(g_io_done == true); 2643 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2644 2645 /* Now set up a more complex, multi-vector command that needs to be split, 2646 * including splitting iovecs. 2647 * optimal_io_boundary < max_segment_size * max_num_segments 2648 */ 2649 bdev->max_segment_size = 3 * 512; 2650 bdev->max_num_segments = 6; 2651 g_io_done = false; 2652 2653 iov[0].iov_base = (void *)0x10000; 2654 iov[0].iov_len = 4 * 512; 2655 iov[1].iov_base = (void *)0x20000; 2656 iov[1].iov_len = 4 * 512; 2657 iov[2].iov_base = (void *)0x30000; 2658 iov[2].iov_len = 10 * 512; 2659 2660 /* IO crossing the IO boundary requires split. 2661 * The 1st child IO segment size exceeds the max_segment_size and after 2662 * splitting segment_size, the num_segments exceeds max_num_segments. 2663 * So 1st child IO will be split to 2 child IOs. 2664 * Total 3 child IOs. 2665 */ 2666 2667 /* The first 2 IOs are in an IO boundary. 2668 * After splitting segment size the segment num exceeds. 2669 * So it splits to 2 child IOs. 2670 */ 2671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2672 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2673 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2674 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2675 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2676 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2677 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2678 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2679 2680 /* The 2nd child IO has the left segment entry */ 2681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2682 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2683 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2684 2685 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2686 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2687 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2688 2689 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2690 CU_ASSERT(rc == 0); 2691 CU_ASSERT(g_io_done == false); 2692 2693 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2694 stub_complete_io(3); 2695 CU_ASSERT(g_io_done == true); 2696 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2697 2698 /* A very complicated case. Each sg entry exceeds max_segment_size 2699 * and split on io boundary. 2700 * optimal_io_boundary < max_segment_size * max_num_segments 2701 */ 2702 bdev->max_segment_size = 3 * 512; 2703 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2704 g_io_done = false; 2705 2706 for (i = 0; i < 20; i++) { 2707 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2708 iov[i].iov_len = 512 * 4; 2709 } 2710 2711 /* IO crossing the IO boundary requires split. 2712 * 80 block length can split 5 child IOs base on offset and IO boundary. 2713 * Each iov entry needs to be split to 2 entries because of max_segment_size 2714 * Total 5 child IOs. 2715 */ 2716 2717 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2718 * So each child IO occupies 8 child iov entries. 2719 */ 2720 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2721 for (i = 0; i < 4; i++) { 2722 int iovcnt = i * 2; 2723 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2724 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2725 } 2726 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2727 2728 /* 2nd child IO and total 16 child iov entries of parent IO */ 2729 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2730 for (i = 4; i < 8; i++) { 2731 int iovcnt = (i - 4) * 2; 2732 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2733 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2734 } 2735 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2736 2737 /* 3rd child IO and total 24 child iov entries of parent IO */ 2738 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2739 for (i = 8; i < 12; i++) { 2740 int iovcnt = (i - 8) * 2; 2741 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2742 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2743 } 2744 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2745 2746 /* 4th child IO and total 32 child iov entries of parent IO */ 2747 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2748 for (i = 12; i < 16; i++) { 2749 int iovcnt = (i - 12) * 2; 2750 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2751 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2752 } 2753 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2754 2755 /* 5th child IO and because of the child iov entry it should be split 2756 * in next round. 2757 */ 2758 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2759 for (i = 16; i < 20; i++) { 2760 int iovcnt = (i - 16) * 2; 2761 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2762 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2763 } 2764 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2765 2766 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2767 CU_ASSERT(rc == 0); 2768 CU_ASSERT(g_io_done == false); 2769 2770 /* First split round */ 2771 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2772 stub_complete_io(4); 2773 CU_ASSERT(g_io_done == false); 2774 2775 /* Second split round */ 2776 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2777 stub_complete_io(1); 2778 CU_ASSERT(g_io_done == true); 2779 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2780 2781 spdk_put_io_channel(io_ch); 2782 spdk_bdev_close(desc); 2783 free_bdev(bdev); 2784 ut_fini_bdev(); 2785 } 2786 2787 static void 2788 bdev_io_split_with_io_wait(void) 2789 { 2790 struct spdk_bdev *bdev; 2791 struct spdk_bdev_desc *desc = NULL; 2792 struct spdk_io_channel *io_ch; 2793 struct spdk_bdev_channel *channel; 2794 struct spdk_bdev_mgmt_channel *mgmt_ch; 2795 struct spdk_bdev_opts bdev_opts = {}; 2796 struct iovec iov[3]; 2797 struct ut_expected_io *expected_io; 2798 int rc; 2799 2800 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2801 bdev_opts.bdev_io_pool_size = 2; 2802 bdev_opts.bdev_io_cache_size = 1; 2803 ut_init_bdev(&bdev_opts); 2804 2805 bdev = allocate_bdev("bdev0"); 2806 2807 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2808 CU_ASSERT(rc == 0); 2809 CU_ASSERT(desc != NULL); 2810 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2811 io_ch = spdk_bdev_get_io_channel(desc); 2812 CU_ASSERT(io_ch != NULL); 2813 channel = spdk_io_channel_get_ctx(io_ch); 2814 mgmt_ch = channel->shared_resource->mgmt_ch; 2815 2816 bdev->optimal_io_boundary = 16; 2817 bdev->split_on_optimal_io_boundary = true; 2818 2819 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2820 CU_ASSERT(rc == 0); 2821 2822 /* Now test that a single-vector command is split correctly. 2823 * Offset 14, length 8, payload 0xF000 2824 * Child - Offset 14, length 2, payload 0xF000 2825 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2826 * 2827 * Set up the expected values before calling spdk_bdev_read_blocks 2828 */ 2829 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2830 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2831 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2832 2833 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2834 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2835 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2836 2837 /* The following children will be submitted sequentially due to the capacity of 2838 * spdk_bdev_io. 2839 */ 2840 2841 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2842 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2843 CU_ASSERT(rc == 0); 2844 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2845 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2846 2847 /* Completing the first read I/O will submit the first child */ 2848 stub_complete_io(1); 2849 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2850 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2851 2852 /* Completing the first child will submit the second child */ 2853 stub_complete_io(1); 2854 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2855 2856 /* Complete the second child I/O. This should result in our callback getting 2857 * invoked since the parent I/O is now complete. 2858 */ 2859 stub_complete_io(1); 2860 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2861 2862 /* Now set up a more complex, multi-vector command that needs to be split, 2863 * including splitting iovecs. 2864 */ 2865 iov[0].iov_base = (void *)0x10000; 2866 iov[0].iov_len = 512; 2867 iov[1].iov_base = (void *)0x20000; 2868 iov[1].iov_len = 20 * 512; 2869 iov[2].iov_base = (void *)0x30000; 2870 iov[2].iov_len = 11 * 512; 2871 2872 g_io_done = false; 2873 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2874 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2875 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2877 2878 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2879 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2880 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2881 2882 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2883 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2884 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2885 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2886 2887 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2888 CU_ASSERT(rc == 0); 2889 CU_ASSERT(g_io_done == false); 2890 2891 /* The following children will be submitted sequentially due to the capacity of 2892 * spdk_bdev_io. 2893 */ 2894 2895 /* Completing the first child will submit the second child */ 2896 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2897 stub_complete_io(1); 2898 CU_ASSERT(g_io_done == false); 2899 2900 /* Completing the second child will submit the third child */ 2901 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2902 stub_complete_io(1); 2903 CU_ASSERT(g_io_done == false); 2904 2905 /* Completing the third child will result in our callback getting invoked 2906 * since the parent I/O is now complete. 2907 */ 2908 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2909 stub_complete_io(1); 2910 CU_ASSERT(g_io_done == true); 2911 2912 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2913 2914 spdk_put_io_channel(io_ch); 2915 spdk_bdev_close(desc); 2916 free_bdev(bdev); 2917 ut_fini_bdev(); 2918 } 2919 2920 static void 2921 bdev_io_write_unit_split_test(void) 2922 { 2923 struct spdk_bdev *bdev; 2924 struct spdk_bdev_desc *desc = NULL; 2925 struct spdk_io_channel *io_ch; 2926 struct spdk_bdev_opts bdev_opts = {}; 2927 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 2928 struct ut_expected_io *expected_io; 2929 uint64_t i; 2930 int rc; 2931 2932 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2933 bdev_opts.bdev_io_pool_size = 512; 2934 bdev_opts.bdev_io_cache_size = 64; 2935 ut_init_bdev(&bdev_opts); 2936 2937 bdev = allocate_bdev("bdev0"); 2938 2939 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2940 CU_ASSERT(rc == 0); 2941 SPDK_CU_ASSERT_FATAL(desc != NULL); 2942 io_ch = spdk_bdev_get_io_channel(desc); 2943 CU_ASSERT(io_ch != NULL); 2944 2945 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 2946 bdev->write_unit_size = 32; 2947 bdev->split_on_write_unit = true; 2948 g_io_done = false; 2949 2950 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 2951 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 2952 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2953 2954 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 2955 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 2956 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2957 2958 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2959 CU_ASSERT(rc == 0); 2960 CU_ASSERT(g_io_done == false); 2961 2962 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2963 stub_complete_io(2); 2964 CU_ASSERT(g_io_done == true); 2965 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2966 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2967 2968 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 2969 * based on write_unit_size, not optimal_io_boundary */ 2970 bdev->split_on_optimal_io_boundary = true; 2971 bdev->optimal_io_boundary = 16; 2972 g_io_done = false; 2973 2974 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 2975 CU_ASSERT(rc == 0); 2976 CU_ASSERT(g_io_done == false); 2977 2978 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2979 stub_complete_io(2); 2980 CU_ASSERT(g_io_done == true); 2981 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2982 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 2983 2984 /* Write I/O should fail if it is smaller than write_unit_size */ 2985 g_io_done = false; 2986 2987 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 2988 CU_ASSERT(rc == 0); 2989 CU_ASSERT(g_io_done == false); 2990 2991 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2992 poll_threads(); 2993 CU_ASSERT(g_io_done == true); 2994 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2995 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2996 2997 /* Same for I/O not aligned to write_unit_size */ 2998 g_io_done = false; 2999 3000 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3001 CU_ASSERT(rc == 0); 3002 CU_ASSERT(g_io_done == false); 3003 3004 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3005 poll_threads(); 3006 CU_ASSERT(g_io_done == true); 3007 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3008 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3009 3010 /* Write should fail if it needs to be split but there are not enough iovs to submit 3011 * an entire write unit */ 3012 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3013 g_io_done = false; 3014 3015 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3016 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3017 iov[i].iov_len = 512; 3018 } 3019 3020 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3021 io_done, NULL); 3022 CU_ASSERT(rc == 0); 3023 CU_ASSERT(g_io_done == false); 3024 3025 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3026 poll_threads(); 3027 CU_ASSERT(g_io_done == true); 3028 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3029 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3030 3031 spdk_put_io_channel(io_ch); 3032 spdk_bdev_close(desc); 3033 free_bdev(bdev); 3034 ut_fini_bdev(); 3035 } 3036 3037 static void 3038 bdev_io_alignment(void) 3039 { 3040 struct spdk_bdev *bdev; 3041 struct spdk_bdev_desc *desc = NULL; 3042 struct spdk_io_channel *io_ch; 3043 struct spdk_bdev_opts bdev_opts = {}; 3044 int rc; 3045 void *buf = NULL; 3046 struct iovec iovs[2]; 3047 int iovcnt; 3048 uint64_t alignment; 3049 3050 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3051 bdev_opts.bdev_io_pool_size = 20; 3052 bdev_opts.bdev_io_cache_size = 2; 3053 ut_init_bdev(&bdev_opts); 3054 3055 fn_table.submit_request = stub_submit_request_get_buf; 3056 bdev = allocate_bdev("bdev0"); 3057 3058 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3059 CU_ASSERT(rc == 0); 3060 CU_ASSERT(desc != NULL); 3061 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3062 io_ch = spdk_bdev_get_io_channel(desc); 3063 CU_ASSERT(io_ch != NULL); 3064 3065 /* Create aligned buffer */ 3066 rc = posix_memalign(&buf, 4096, 8192); 3067 SPDK_CU_ASSERT_FATAL(rc == 0); 3068 3069 /* Pass aligned single buffer with no alignment required */ 3070 alignment = 1; 3071 bdev->required_alignment = spdk_u32log2(alignment); 3072 3073 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3074 CU_ASSERT(rc == 0); 3075 stub_complete_io(1); 3076 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3077 alignment)); 3078 3079 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3080 CU_ASSERT(rc == 0); 3081 stub_complete_io(1); 3082 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3083 alignment)); 3084 3085 /* Pass unaligned single buffer with no alignment required */ 3086 alignment = 1; 3087 bdev->required_alignment = spdk_u32log2(alignment); 3088 3089 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3090 CU_ASSERT(rc == 0); 3091 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3092 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3093 stub_complete_io(1); 3094 3095 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3096 CU_ASSERT(rc == 0); 3097 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3098 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3099 stub_complete_io(1); 3100 3101 /* Pass unaligned single buffer with 512 alignment required */ 3102 alignment = 512; 3103 bdev->required_alignment = spdk_u32log2(alignment); 3104 3105 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3106 CU_ASSERT(rc == 0); 3107 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3108 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3109 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3110 alignment)); 3111 stub_complete_io(1); 3112 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3113 3114 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3115 CU_ASSERT(rc == 0); 3116 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3117 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3118 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3119 alignment)); 3120 stub_complete_io(1); 3121 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3122 3123 /* Pass unaligned single buffer with 4096 alignment required */ 3124 alignment = 4096; 3125 bdev->required_alignment = spdk_u32log2(alignment); 3126 3127 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3128 CU_ASSERT(rc == 0); 3129 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3130 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3131 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3132 alignment)); 3133 stub_complete_io(1); 3134 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3135 3136 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3137 CU_ASSERT(rc == 0); 3138 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 3139 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3140 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3141 alignment)); 3142 stub_complete_io(1); 3143 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3144 3145 /* Pass aligned iovs with no alignment required */ 3146 alignment = 1; 3147 bdev->required_alignment = spdk_u32log2(alignment); 3148 3149 iovcnt = 1; 3150 iovs[0].iov_base = buf; 3151 iovs[0].iov_len = 512; 3152 3153 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3154 CU_ASSERT(rc == 0); 3155 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3156 stub_complete_io(1); 3157 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3158 3159 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3160 CU_ASSERT(rc == 0); 3161 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3162 stub_complete_io(1); 3163 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3164 3165 /* Pass unaligned iovs with no alignment required */ 3166 alignment = 1; 3167 bdev->required_alignment = spdk_u32log2(alignment); 3168 3169 iovcnt = 2; 3170 iovs[0].iov_base = buf + 16; 3171 iovs[0].iov_len = 256; 3172 iovs[1].iov_base = buf + 16 + 256 + 32; 3173 iovs[1].iov_len = 256; 3174 3175 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3176 CU_ASSERT(rc == 0); 3177 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3178 stub_complete_io(1); 3179 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3180 3181 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3182 CU_ASSERT(rc == 0); 3183 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3184 stub_complete_io(1); 3185 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3186 3187 /* Pass unaligned iov with 2048 alignment required */ 3188 alignment = 2048; 3189 bdev->required_alignment = spdk_u32log2(alignment); 3190 3191 iovcnt = 2; 3192 iovs[0].iov_base = buf + 16; 3193 iovs[0].iov_len = 256; 3194 iovs[1].iov_base = buf + 16 + 256 + 32; 3195 iovs[1].iov_len = 256; 3196 3197 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3198 CU_ASSERT(rc == 0); 3199 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3200 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3201 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3202 alignment)); 3203 stub_complete_io(1); 3204 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3205 3206 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3207 CU_ASSERT(rc == 0); 3208 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3209 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3210 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3211 alignment)); 3212 stub_complete_io(1); 3213 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3214 3215 /* Pass iov without allocated buffer without alignment required */ 3216 alignment = 1; 3217 bdev->required_alignment = spdk_u32log2(alignment); 3218 3219 iovcnt = 1; 3220 iovs[0].iov_base = NULL; 3221 iovs[0].iov_len = 0; 3222 3223 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3224 CU_ASSERT(rc == 0); 3225 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3226 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3227 alignment)); 3228 stub_complete_io(1); 3229 3230 /* Pass iov without allocated buffer with 1024 alignment required */ 3231 alignment = 1024; 3232 bdev->required_alignment = spdk_u32log2(alignment); 3233 3234 iovcnt = 1; 3235 iovs[0].iov_base = NULL; 3236 iovs[0].iov_len = 0; 3237 3238 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3239 CU_ASSERT(rc == 0); 3240 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3241 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3242 alignment)); 3243 stub_complete_io(1); 3244 3245 spdk_put_io_channel(io_ch); 3246 spdk_bdev_close(desc); 3247 free_bdev(bdev); 3248 fn_table.submit_request = stub_submit_request; 3249 ut_fini_bdev(); 3250 3251 free(buf); 3252 } 3253 3254 static void 3255 bdev_io_alignment_with_boundary(void) 3256 { 3257 struct spdk_bdev *bdev; 3258 struct spdk_bdev_desc *desc = NULL; 3259 struct spdk_io_channel *io_ch; 3260 struct spdk_bdev_opts bdev_opts = {}; 3261 int rc; 3262 void *buf = NULL; 3263 struct iovec iovs[2]; 3264 int iovcnt; 3265 uint64_t alignment; 3266 3267 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3268 bdev_opts.bdev_io_pool_size = 20; 3269 bdev_opts.bdev_io_cache_size = 2; 3270 bdev_opts.opts_size = sizeof(bdev_opts); 3271 ut_init_bdev(&bdev_opts); 3272 3273 fn_table.submit_request = stub_submit_request_get_buf; 3274 bdev = allocate_bdev("bdev0"); 3275 3276 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3277 CU_ASSERT(rc == 0); 3278 CU_ASSERT(desc != NULL); 3279 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3280 io_ch = spdk_bdev_get_io_channel(desc); 3281 CU_ASSERT(io_ch != NULL); 3282 3283 /* Create aligned buffer */ 3284 rc = posix_memalign(&buf, 4096, 131072); 3285 SPDK_CU_ASSERT_FATAL(rc == 0); 3286 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3287 3288 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3289 alignment = 512; 3290 bdev->required_alignment = spdk_u32log2(alignment); 3291 bdev->optimal_io_boundary = 2; 3292 bdev->split_on_optimal_io_boundary = true; 3293 3294 iovcnt = 1; 3295 iovs[0].iov_base = NULL; 3296 iovs[0].iov_len = 512 * 3; 3297 3298 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3299 CU_ASSERT(rc == 0); 3300 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3301 stub_complete_io(2); 3302 3303 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3304 alignment = 512; 3305 bdev->required_alignment = spdk_u32log2(alignment); 3306 bdev->optimal_io_boundary = 16; 3307 bdev->split_on_optimal_io_boundary = true; 3308 3309 iovcnt = 1; 3310 iovs[0].iov_base = NULL; 3311 iovs[0].iov_len = 512 * 16; 3312 3313 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3314 CU_ASSERT(rc == 0); 3315 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3316 stub_complete_io(2); 3317 3318 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3319 alignment = 512; 3320 bdev->required_alignment = spdk_u32log2(alignment); 3321 bdev->optimal_io_boundary = 128; 3322 bdev->split_on_optimal_io_boundary = true; 3323 3324 iovcnt = 1; 3325 iovs[0].iov_base = buf + 16; 3326 iovs[0].iov_len = 512 * 160; 3327 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3328 CU_ASSERT(rc == 0); 3329 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3330 stub_complete_io(2); 3331 3332 /* 512 * 3 with 2 IO boundary */ 3333 alignment = 512; 3334 bdev->required_alignment = spdk_u32log2(alignment); 3335 bdev->optimal_io_boundary = 2; 3336 bdev->split_on_optimal_io_boundary = true; 3337 3338 iovcnt = 2; 3339 iovs[0].iov_base = buf + 16; 3340 iovs[0].iov_len = 512; 3341 iovs[1].iov_base = buf + 16 + 512 + 32; 3342 iovs[1].iov_len = 1024; 3343 3344 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3345 CU_ASSERT(rc == 0); 3346 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3347 stub_complete_io(2); 3348 3349 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3350 CU_ASSERT(rc == 0); 3351 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3352 stub_complete_io(2); 3353 3354 /* 512 * 64 with 32 IO boundary */ 3355 bdev->optimal_io_boundary = 32; 3356 iovcnt = 2; 3357 iovs[0].iov_base = buf + 16; 3358 iovs[0].iov_len = 16384; 3359 iovs[1].iov_base = buf + 16 + 16384 + 32; 3360 iovs[1].iov_len = 16384; 3361 3362 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3363 CU_ASSERT(rc == 0); 3364 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3365 stub_complete_io(3); 3366 3367 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3368 CU_ASSERT(rc == 0); 3369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3370 stub_complete_io(3); 3371 3372 /* 512 * 160 with 32 IO boundary */ 3373 iovcnt = 1; 3374 iovs[0].iov_base = buf + 16; 3375 iovs[0].iov_len = 16384 + 65536; 3376 3377 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3378 CU_ASSERT(rc == 0); 3379 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3380 stub_complete_io(6); 3381 3382 spdk_put_io_channel(io_ch); 3383 spdk_bdev_close(desc); 3384 free_bdev(bdev); 3385 fn_table.submit_request = stub_submit_request; 3386 ut_fini_bdev(); 3387 3388 free(buf); 3389 } 3390 3391 static void 3392 histogram_status_cb(void *cb_arg, int status) 3393 { 3394 g_status = status; 3395 } 3396 3397 static void 3398 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3399 { 3400 g_status = status; 3401 g_histogram = histogram; 3402 } 3403 3404 static void 3405 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3406 uint64_t total, uint64_t so_far) 3407 { 3408 g_count += count; 3409 } 3410 3411 static void 3412 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3413 { 3414 spdk_histogram_data_fn cb_fn = cb_arg; 3415 3416 g_status = status; 3417 3418 if (status == 0) { 3419 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3420 } 3421 } 3422 3423 static void 3424 bdev_histograms(void) 3425 { 3426 struct spdk_bdev *bdev; 3427 struct spdk_bdev_desc *desc = NULL; 3428 struct spdk_io_channel *ch; 3429 struct spdk_histogram_data *histogram; 3430 uint8_t buf[4096]; 3431 int rc; 3432 3433 ut_init_bdev(NULL); 3434 3435 bdev = allocate_bdev("bdev"); 3436 3437 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3438 CU_ASSERT(rc == 0); 3439 CU_ASSERT(desc != NULL); 3440 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3441 3442 ch = spdk_bdev_get_io_channel(desc); 3443 CU_ASSERT(ch != NULL); 3444 3445 /* Enable histogram */ 3446 g_status = -1; 3447 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3448 poll_threads(); 3449 CU_ASSERT(g_status == 0); 3450 CU_ASSERT(bdev->internal.histogram_enabled == true); 3451 3452 /* Allocate histogram */ 3453 histogram = spdk_histogram_data_alloc(); 3454 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3455 3456 /* Check if histogram is zeroed */ 3457 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3458 poll_threads(); 3459 CU_ASSERT(g_status == 0); 3460 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3461 3462 g_count = 0; 3463 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3464 3465 CU_ASSERT(g_count == 0); 3466 3467 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3468 CU_ASSERT(rc == 0); 3469 3470 spdk_delay_us(10); 3471 stub_complete_io(1); 3472 poll_threads(); 3473 3474 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3475 CU_ASSERT(rc == 0); 3476 3477 spdk_delay_us(10); 3478 stub_complete_io(1); 3479 poll_threads(); 3480 3481 /* Check if histogram gathered data from all I/O channels */ 3482 g_histogram = NULL; 3483 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3484 poll_threads(); 3485 CU_ASSERT(g_status == 0); 3486 CU_ASSERT(bdev->internal.histogram_enabled == true); 3487 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3488 3489 g_count = 0; 3490 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3491 CU_ASSERT(g_count == 2); 3492 3493 g_count = 0; 3494 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3495 CU_ASSERT(g_status == 0); 3496 CU_ASSERT(g_count == 2); 3497 3498 /* Disable histogram */ 3499 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3500 poll_threads(); 3501 CU_ASSERT(g_status == 0); 3502 CU_ASSERT(bdev->internal.histogram_enabled == false); 3503 3504 /* Try to run histogram commands on disabled bdev */ 3505 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3506 poll_threads(); 3507 CU_ASSERT(g_status == -EFAULT); 3508 3509 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3510 CU_ASSERT(g_status == -EFAULT); 3511 3512 spdk_histogram_data_free(histogram); 3513 spdk_put_io_channel(ch); 3514 spdk_bdev_close(desc); 3515 free_bdev(bdev); 3516 ut_fini_bdev(); 3517 } 3518 3519 static void 3520 _bdev_compare(bool emulated) 3521 { 3522 struct spdk_bdev *bdev; 3523 struct spdk_bdev_desc *desc = NULL; 3524 struct spdk_io_channel *ioch; 3525 struct ut_expected_io *expected_io; 3526 uint64_t offset, num_blocks; 3527 uint32_t num_completed; 3528 char aa_buf[512]; 3529 char bb_buf[512]; 3530 struct iovec compare_iov; 3531 uint8_t expected_io_type; 3532 int rc; 3533 3534 if (emulated) { 3535 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3536 } else { 3537 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3538 } 3539 3540 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3541 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3542 3543 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3544 3545 ut_init_bdev(NULL); 3546 fn_table.submit_request = stub_submit_request_get_buf; 3547 bdev = allocate_bdev("bdev"); 3548 3549 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3550 CU_ASSERT_EQUAL(rc, 0); 3551 SPDK_CU_ASSERT_FATAL(desc != NULL); 3552 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3553 ioch = spdk_bdev_get_io_channel(desc); 3554 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3555 3556 fn_table.submit_request = stub_submit_request_get_buf; 3557 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3558 3559 offset = 50; 3560 num_blocks = 1; 3561 compare_iov.iov_base = aa_buf; 3562 compare_iov.iov_len = sizeof(aa_buf); 3563 3564 /* 1. successful compare */ 3565 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3566 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3567 3568 g_io_done = false; 3569 g_compare_read_buf = aa_buf; 3570 g_compare_read_buf_len = sizeof(aa_buf); 3571 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3572 CU_ASSERT_EQUAL(rc, 0); 3573 num_completed = stub_complete_io(1); 3574 CU_ASSERT_EQUAL(num_completed, 1); 3575 CU_ASSERT(g_io_done == true); 3576 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3577 3578 /* 2. miscompare */ 3579 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3581 3582 g_io_done = false; 3583 g_compare_read_buf = bb_buf; 3584 g_compare_read_buf_len = sizeof(bb_buf); 3585 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3586 CU_ASSERT_EQUAL(rc, 0); 3587 num_completed = stub_complete_io(1); 3588 CU_ASSERT_EQUAL(num_completed, 1); 3589 CU_ASSERT(g_io_done == true); 3590 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3591 3592 spdk_put_io_channel(ioch); 3593 spdk_bdev_close(desc); 3594 free_bdev(bdev); 3595 fn_table.submit_request = stub_submit_request; 3596 ut_fini_bdev(); 3597 3598 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3599 3600 g_compare_read_buf = NULL; 3601 } 3602 3603 static void 3604 _bdev_compare_with_md(bool emulated) 3605 { 3606 struct spdk_bdev *bdev; 3607 struct spdk_bdev_desc *desc = NULL; 3608 struct spdk_io_channel *ioch; 3609 struct ut_expected_io *expected_io; 3610 uint64_t offset, num_blocks; 3611 uint32_t num_completed; 3612 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3613 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3614 char buf_miscompare[1024 /* 2 * blocklen */]; 3615 char md_buf[16]; 3616 char md_buf_miscompare[16]; 3617 struct iovec compare_iov; 3618 uint8_t expected_io_type; 3619 int rc; 3620 3621 if (emulated) { 3622 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3623 } else { 3624 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3625 } 3626 3627 memset(buf, 0xaa, sizeof(buf)); 3628 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3629 /* make last md different */ 3630 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3631 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3632 memset(md_buf, 0xaa, 16); 3633 memset(md_buf_miscompare, 0xbb, 16); 3634 3635 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3636 3637 ut_init_bdev(NULL); 3638 fn_table.submit_request = stub_submit_request_get_buf; 3639 bdev = allocate_bdev("bdev"); 3640 3641 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3642 CU_ASSERT_EQUAL(rc, 0); 3643 SPDK_CU_ASSERT_FATAL(desc != NULL); 3644 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3645 ioch = spdk_bdev_get_io_channel(desc); 3646 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3647 3648 fn_table.submit_request = stub_submit_request_get_buf; 3649 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3650 3651 offset = 50; 3652 num_blocks = 2; 3653 3654 /* interleaved md & data */ 3655 bdev->md_interleave = true; 3656 bdev->md_len = 8; 3657 bdev->blocklen = 512 + 8; 3658 compare_iov.iov_base = buf; 3659 compare_iov.iov_len = sizeof(buf); 3660 3661 /* 1. successful compare with md interleaved */ 3662 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3663 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3664 3665 g_io_done = false; 3666 g_compare_read_buf = buf; 3667 g_compare_read_buf_len = sizeof(buf); 3668 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3669 CU_ASSERT_EQUAL(rc, 0); 3670 num_completed = stub_complete_io(1); 3671 CU_ASSERT_EQUAL(num_completed, 1); 3672 CU_ASSERT(g_io_done == true); 3673 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3674 3675 /* 2. miscompare with md interleaved */ 3676 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3677 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3678 3679 g_io_done = false; 3680 g_compare_read_buf = buf_interleaved_miscompare; 3681 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3682 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3683 CU_ASSERT_EQUAL(rc, 0); 3684 num_completed = stub_complete_io(1); 3685 CU_ASSERT_EQUAL(num_completed, 1); 3686 CU_ASSERT(g_io_done == true); 3687 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3688 3689 /* Separate data & md buffers */ 3690 bdev->md_interleave = false; 3691 bdev->blocklen = 512; 3692 compare_iov.iov_base = buf; 3693 compare_iov.iov_len = 1024; 3694 3695 /* 3. successful compare with md separated */ 3696 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3697 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3698 3699 g_io_done = false; 3700 g_compare_read_buf = buf; 3701 g_compare_read_buf_len = 1024; 3702 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3703 g_compare_md_buf = md_buf; 3704 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3705 offset, num_blocks, io_done, NULL); 3706 CU_ASSERT_EQUAL(rc, 0); 3707 num_completed = stub_complete_io(1); 3708 CU_ASSERT_EQUAL(num_completed, 1); 3709 CU_ASSERT(g_io_done == true); 3710 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3711 3712 /* 4. miscompare with md separated where md buf is different */ 3713 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3714 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3715 3716 g_io_done = false; 3717 g_compare_read_buf = buf; 3718 g_compare_read_buf_len = 1024; 3719 g_compare_md_buf = md_buf_miscompare; 3720 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3721 offset, num_blocks, io_done, NULL); 3722 CU_ASSERT_EQUAL(rc, 0); 3723 num_completed = stub_complete_io(1); 3724 CU_ASSERT_EQUAL(num_completed, 1); 3725 CU_ASSERT(g_io_done == true); 3726 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3727 3728 /* 5. miscompare with md separated where buf is different */ 3729 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3730 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3731 3732 g_io_done = false; 3733 g_compare_read_buf = buf_miscompare; 3734 g_compare_read_buf_len = sizeof(buf_miscompare); 3735 g_compare_md_buf = md_buf; 3736 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3737 offset, num_blocks, io_done, NULL); 3738 CU_ASSERT_EQUAL(rc, 0); 3739 num_completed = stub_complete_io(1); 3740 CU_ASSERT_EQUAL(num_completed, 1); 3741 CU_ASSERT(g_io_done == true); 3742 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3743 3744 bdev->md_len = 0; 3745 g_compare_md_buf = NULL; 3746 3747 spdk_put_io_channel(ioch); 3748 spdk_bdev_close(desc); 3749 free_bdev(bdev); 3750 fn_table.submit_request = stub_submit_request; 3751 ut_fini_bdev(); 3752 3753 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3754 3755 g_compare_read_buf = NULL; 3756 } 3757 3758 static void 3759 bdev_compare(void) 3760 { 3761 _bdev_compare(false); 3762 _bdev_compare_with_md(false); 3763 } 3764 3765 static void 3766 bdev_compare_emulated(void) 3767 { 3768 _bdev_compare(true); 3769 _bdev_compare_with_md(true); 3770 } 3771 3772 static void 3773 bdev_compare_and_write(void) 3774 { 3775 struct spdk_bdev *bdev; 3776 struct spdk_bdev_desc *desc = NULL; 3777 struct spdk_io_channel *ioch; 3778 struct ut_expected_io *expected_io; 3779 uint64_t offset, num_blocks; 3780 uint32_t num_completed; 3781 char aa_buf[512]; 3782 char bb_buf[512]; 3783 char cc_buf[512]; 3784 char write_buf[512]; 3785 struct iovec compare_iov; 3786 struct iovec write_iov; 3787 int rc; 3788 3789 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3790 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3791 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3792 3793 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3794 3795 ut_init_bdev(NULL); 3796 fn_table.submit_request = stub_submit_request_get_buf; 3797 bdev = allocate_bdev("bdev"); 3798 3799 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3800 CU_ASSERT_EQUAL(rc, 0); 3801 SPDK_CU_ASSERT_FATAL(desc != NULL); 3802 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3803 ioch = spdk_bdev_get_io_channel(desc); 3804 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3805 3806 fn_table.submit_request = stub_submit_request_get_buf; 3807 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3808 3809 offset = 50; 3810 num_blocks = 1; 3811 compare_iov.iov_base = aa_buf; 3812 compare_iov.iov_len = sizeof(aa_buf); 3813 write_iov.iov_base = bb_buf; 3814 write_iov.iov_len = sizeof(bb_buf); 3815 3816 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3817 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3818 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3819 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3820 3821 g_io_done = false; 3822 g_compare_read_buf = aa_buf; 3823 g_compare_read_buf_len = sizeof(aa_buf); 3824 memset(write_buf, 0, sizeof(write_buf)); 3825 g_compare_write_buf = write_buf; 3826 g_compare_write_buf_len = sizeof(write_buf); 3827 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3828 offset, num_blocks, io_done, NULL); 3829 /* Trigger range locking */ 3830 poll_threads(); 3831 CU_ASSERT_EQUAL(rc, 0); 3832 num_completed = stub_complete_io(1); 3833 CU_ASSERT_EQUAL(num_completed, 1); 3834 CU_ASSERT(g_io_done == false); 3835 num_completed = stub_complete_io(1); 3836 /* Trigger range unlocking */ 3837 poll_threads(); 3838 CU_ASSERT_EQUAL(num_completed, 1); 3839 CU_ASSERT(g_io_done == true); 3840 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3841 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3842 3843 /* Test miscompare */ 3844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3845 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3846 3847 g_io_done = false; 3848 g_compare_read_buf = cc_buf; 3849 g_compare_read_buf_len = sizeof(cc_buf); 3850 memset(write_buf, 0, sizeof(write_buf)); 3851 g_compare_write_buf = write_buf; 3852 g_compare_write_buf_len = sizeof(write_buf); 3853 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3854 offset, num_blocks, io_done, NULL); 3855 /* Trigger range locking */ 3856 poll_threads(); 3857 CU_ASSERT_EQUAL(rc, 0); 3858 num_completed = stub_complete_io(1); 3859 /* Trigger range unlocking earlier because we expect error here */ 3860 poll_threads(); 3861 CU_ASSERT_EQUAL(num_completed, 1); 3862 CU_ASSERT(g_io_done == true); 3863 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3864 num_completed = stub_complete_io(1); 3865 CU_ASSERT_EQUAL(num_completed, 0); 3866 3867 spdk_put_io_channel(ioch); 3868 spdk_bdev_close(desc); 3869 free_bdev(bdev); 3870 fn_table.submit_request = stub_submit_request; 3871 ut_fini_bdev(); 3872 3873 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3874 3875 g_compare_read_buf = NULL; 3876 g_compare_write_buf = NULL; 3877 } 3878 3879 static void 3880 bdev_write_zeroes(void) 3881 { 3882 struct spdk_bdev *bdev; 3883 struct spdk_bdev_desc *desc = NULL; 3884 struct spdk_io_channel *ioch; 3885 struct ut_expected_io *expected_io; 3886 uint64_t offset, num_io_blocks, num_blocks; 3887 uint32_t num_completed, num_requests; 3888 int rc; 3889 3890 ut_init_bdev(NULL); 3891 bdev = allocate_bdev("bdev"); 3892 3893 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3894 CU_ASSERT_EQUAL(rc, 0); 3895 SPDK_CU_ASSERT_FATAL(desc != NULL); 3896 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3897 ioch = spdk_bdev_get_io_channel(desc); 3898 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3899 3900 fn_table.submit_request = stub_submit_request; 3901 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3902 3903 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3904 bdev->md_len = 0; 3905 bdev->blocklen = 4096; 3906 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3907 3908 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3909 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3910 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3911 CU_ASSERT_EQUAL(rc, 0); 3912 num_completed = stub_complete_io(1); 3913 CU_ASSERT_EQUAL(num_completed, 1); 3914 3915 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3916 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3917 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3918 num_requests = 2; 3919 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3920 3921 for (offset = 0; offset < num_requests; ++offset) { 3922 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3923 offset * num_io_blocks, num_io_blocks, 0); 3924 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3925 } 3926 3927 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3928 CU_ASSERT_EQUAL(rc, 0); 3929 num_completed = stub_complete_io(num_requests); 3930 CU_ASSERT_EQUAL(num_completed, num_requests); 3931 3932 /* Check that the splitting is correct if bdev has interleaved metadata */ 3933 bdev->md_interleave = true; 3934 bdev->md_len = 64; 3935 bdev->blocklen = 4096 + 64; 3936 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3937 3938 num_requests = offset = 0; 3939 while (offset < num_blocks) { 3940 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3941 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3942 offset, num_io_blocks, 0); 3943 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3944 offset += num_io_blocks; 3945 num_requests++; 3946 } 3947 3948 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3949 CU_ASSERT_EQUAL(rc, 0); 3950 num_completed = stub_complete_io(num_requests); 3951 CU_ASSERT_EQUAL(num_completed, num_requests); 3952 num_completed = stub_complete_io(num_requests); 3953 assert(num_completed == 0); 3954 3955 /* Check the the same for separate metadata buffer */ 3956 bdev->md_interleave = false; 3957 bdev->md_len = 64; 3958 bdev->blocklen = 4096; 3959 3960 num_requests = offset = 0; 3961 while (offset < num_blocks) { 3962 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3963 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3964 offset, num_io_blocks, 0); 3965 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3966 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3967 offset += num_io_blocks; 3968 num_requests++; 3969 } 3970 3971 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3972 CU_ASSERT_EQUAL(rc, 0); 3973 num_completed = stub_complete_io(num_requests); 3974 CU_ASSERT_EQUAL(num_completed, num_requests); 3975 3976 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3977 spdk_put_io_channel(ioch); 3978 spdk_bdev_close(desc); 3979 free_bdev(bdev); 3980 ut_fini_bdev(); 3981 } 3982 3983 static void 3984 bdev_zcopy_write(void) 3985 { 3986 struct spdk_bdev *bdev; 3987 struct spdk_bdev_desc *desc = NULL; 3988 struct spdk_io_channel *ioch; 3989 struct ut_expected_io *expected_io; 3990 uint64_t offset, num_blocks; 3991 uint32_t num_completed; 3992 char aa_buf[512]; 3993 struct iovec iov; 3994 int rc; 3995 const bool populate = false; 3996 const bool commit = true; 3997 3998 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3999 4000 ut_init_bdev(NULL); 4001 bdev = allocate_bdev("bdev"); 4002 4003 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4004 CU_ASSERT_EQUAL(rc, 0); 4005 SPDK_CU_ASSERT_FATAL(desc != NULL); 4006 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4007 ioch = spdk_bdev_get_io_channel(desc); 4008 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4009 4010 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4011 4012 offset = 50; 4013 num_blocks = 1; 4014 iov.iov_base = NULL; 4015 iov.iov_len = 0; 4016 4017 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4018 g_zcopy_read_buf_len = (uint32_t) -1; 4019 /* Do a zcopy start for a write (populate=false) */ 4020 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4021 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4022 g_io_done = false; 4023 g_zcopy_write_buf = aa_buf; 4024 g_zcopy_write_buf_len = sizeof(aa_buf); 4025 g_zcopy_bdev_io = NULL; 4026 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4027 CU_ASSERT_EQUAL(rc, 0); 4028 num_completed = stub_complete_io(1); 4029 CU_ASSERT_EQUAL(num_completed, 1); 4030 CU_ASSERT(g_io_done == true); 4031 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4032 /* Check that the iov has been set up */ 4033 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4034 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4035 /* Check that the bdev_io has been saved */ 4036 CU_ASSERT(g_zcopy_bdev_io != NULL); 4037 /* Now do the zcopy end for a write (commit=true) */ 4038 g_io_done = false; 4039 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4040 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4041 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4042 CU_ASSERT_EQUAL(rc, 0); 4043 num_completed = stub_complete_io(1); 4044 CU_ASSERT_EQUAL(num_completed, 1); 4045 CU_ASSERT(g_io_done == true); 4046 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4047 /* Check the g_zcopy are reset by io_done */ 4048 CU_ASSERT(g_zcopy_write_buf == NULL); 4049 CU_ASSERT(g_zcopy_write_buf_len == 0); 4050 /* Check that io_done has freed the g_zcopy_bdev_io */ 4051 CU_ASSERT(g_zcopy_bdev_io == NULL); 4052 4053 /* Check the zcopy read buffer has not been touched which 4054 * ensures that the correct buffers were used. 4055 */ 4056 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4057 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4058 4059 spdk_put_io_channel(ioch); 4060 spdk_bdev_close(desc); 4061 free_bdev(bdev); 4062 ut_fini_bdev(); 4063 } 4064 4065 static void 4066 bdev_zcopy_read(void) 4067 { 4068 struct spdk_bdev *bdev; 4069 struct spdk_bdev_desc *desc = NULL; 4070 struct spdk_io_channel *ioch; 4071 struct ut_expected_io *expected_io; 4072 uint64_t offset, num_blocks; 4073 uint32_t num_completed; 4074 char aa_buf[512]; 4075 struct iovec iov; 4076 int rc; 4077 const bool populate = true; 4078 const bool commit = false; 4079 4080 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4081 4082 ut_init_bdev(NULL); 4083 bdev = allocate_bdev("bdev"); 4084 4085 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4086 CU_ASSERT_EQUAL(rc, 0); 4087 SPDK_CU_ASSERT_FATAL(desc != NULL); 4088 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4089 ioch = spdk_bdev_get_io_channel(desc); 4090 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4091 4092 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4093 4094 offset = 50; 4095 num_blocks = 1; 4096 iov.iov_base = NULL; 4097 iov.iov_len = 0; 4098 4099 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4100 g_zcopy_write_buf_len = (uint32_t) -1; 4101 4102 /* Do a zcopy start for a read (populate=true) */ 4103 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4104 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4105 g_io_done = false; 4106 g_zcopy_read_buf = aa_buf; 4107 g_zcopy_read_buf_len = sizeof(aa_buf); 4108 g_zcopy_bdev_io = NULL; 4109 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4110 CU_ASSERT_EQUAL(rc, 0); 4111 num_completed = stub_complete_io(1); 4112 CU_ASSERT_EQUAL(num_completed, 1); 4113 CU_ASSERT(g_io_done == true); 4114 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4115 /* Check that the iov has been set up */ 4116 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4117 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4118 /* Check that the bdev_io has been saved */ 4119 CU_ASSERT(g_zcopy_bdev_io != NULL); 4120 4121 /* Now do the zcopy end for a read (commit=false) */ 4122 g_io_done = false; 4123 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4124 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4125 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4126 CU_ASSERT_EQUAL(rc, 0); 4127 num_completed = stub_complete_io(1); 4128 CU_ASSERT_EQUAL(num_completed, 1); 4129 CU_ASSERT(g_io_done == true); 4130 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4131 /* Check the g_zcopy are reset by io_done */ 4132 CU_ASSERT(g_zcopy_read_buf == NULL); 4133 CU_ASSERT(g_zcopy_read_buf_len == 0); 4134 /* Check that io_done has freed the g_zcopy_bdev_io */ 4135 CU_ASSERT(g_zcopy_bdev_io == NULL); 4136 4137 /* Check the zcopy write buffer has not been touched which 4138 * ensures that the correct buffers were used. 4139 */ 4140 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4141 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4142 4143 spdk_put_io_channel(ioch); 4144 spdk_bdev_close(desc); 4145 free_bdev(bdev); 4146 ut_fini_bdev(); 4147 } 4148 4149 static void 4150 bdev_open_while_hotremove(void) 4151 { 4152 struct spdk_bdev *bdev; 4153 struct spdk_bdev_desc *desc[2] = {}; 4154 int rc; 4155 4156 bdev = allocate_bdev("bdev"); 4157 4158 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4159 CU_ASSERT(rc == 0); 4160 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4161 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4162 4163 spdk_bdev_unregister(bdev, NULL, NULL); 4164 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4165 poll_threads(); 4166 4167 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4168 CU_ASSERT(rc == -ENODEV); 4169 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4170 4171 spdk_bdev_close(desc[0]); 4172 free_bdev(bdev); 4173 } 4174 4175 static void 4176 bdev_close_while_hotremove(void) 4177 { 4178 struct spdk_bdev *bdev; 4179 struct spdk_bdev_desc *desc = NULL; 4180 int rc = 0; 4181 4182 bdev = allocate_bdev("bdev"); 4183 4184 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4185 CU_ASSERT_EQUAL(rc, 0); 4186 SPDK_CU_ASSERT_FATAL(desc != NULL); 4187 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4188 4189 /* Simulate hot-unplug by unregistering bdev */ 4190 g_event_type1 = 0xFF; 4191 g_unregister_arg = NULL; 4192 g_unregister_rc = -1; 4193 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4194 /* Close device while remove event is in flight */ 4195 spdk_bdev_close(desc); 4196 4197 /* Ensure that unregister callback is delayed */ 4198 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4199 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4200 4201 poll_threads(); 4202 4203 /* Event callback shall not be issued because device was closed */ 4204 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4205 /* Unregister callback is issued */ 4206 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4207 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4208 4209 free_bdev(bdev); 4210 } 4211 4212 static void 4213 bdev_open_ext(void) 4214 { 4215 struct spdk_bdev *bdev; 4216 struct spdk_bdev_desc *desc1 = NULL; 4217 struct spdk_bdev_desc *desc2 = NULL; 4218 int rc = 0; 4219 4220 bdev = allocate_bdev("bdev"); 4221 4222 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4223 CU_ASSERT_EQUAL(rc, -EINVAL); 4224 4225 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4226 CU_ASSERT_EQUAL(rc, 0); 4227 4228 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4229 CU_ASSERT_EQUAL(rc, 0); 4230 4231 g_event_type1 = 0xFF; 4232 g_event_type2 = 0xFF; 4233 4234 /* Simulate hot-unplug by unregistering bdev */ 4235 spdk_bdev_unregister(bdev, NULL, NULL); 4236 poll_threads(); 4237 4238 /* Check if correct events have been triggered in event callback fn */ 4239 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4240 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4241 4242 free_bdev(bdev); 4243 poll_threads(); 4244 } 4245 4246 static void 4247 bdev_open_ext_unregister(void) 4248 { 4249 struct spdk_bdev *bdev; 4250 struct spdk_bdev_desc *desc1 = NULL; 4251 struct spdk_bdev_desc *desc2 = NULL; 4252 struct spdk_bdev_desc *desc3 = NULL; 4253 struct spdk_bdev_desc *desc4 = NULL; 4254 int rc = 0; 4255 4256 bdev = allocate_bdev("bdev"); 4257 4258 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4259 CU_ASSERT_EQUAL(rc, -EINVAL); 4260 4261 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4262 CU_ASSERT_EQUAL(rc, 0); 4263 4264 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4265 CU_ASSERT_EQUAL(rc, 0); 4266 4267 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4268 CU_ASSERT_EQUAL(rc, 0); 4269 4270 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4271 CU_ASSERT_EQUAL(rc, 0); 4272 4273 g_event_type1 = 0xFF; 4274 g_event_type2 = 0xFF; 4275 g_event_type3 = 0xFF; 4276 g_event_type4 = 0xFF; 4277 4278 g_unregister_arg = NULL; 4279 g_unregister_rc = -1; 4280 4281 /* Simulate hot-unplug by unregistering bdev */ 4282 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4283 4284 /* 4285 * Unregister is handled asynchronously and event callback 4286 * (i.e., above bdev_open_cbN) will be called. 4287 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4288 * close the desc3 and desc4 so that the bdev is not closed. 4289 */ 4290 poll_threads(); 4291 4292 /* Check if correct events have been triggered in event callback fn */ 4293 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4294 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4295 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4296 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4297 4298 /* Check that unregister callback is delayed */ 4299 CU_ASSERT(g_unregister_arg == NULL); 4300 CU_ASSERT(g_unregister_rc == -1); 4301 4302 /* 4303 * Explicitly close desc3. As desc4 is still opened there, the 4304 * unergister callback is still delayed to execute. 4305 */ 4306 spdk_bdev_close(desc3); 4307 CU_ASSERT(g_unregister_arg == NULL); 4308 CU_ASSERT(g_unregister_rc == -1); 4309 4310 /* 4311 * Explicitly close desc4 to trigger the ongoing bdev unregister 4312 * operation after last desc is closed. 4313 */ 4314 spdk_bdev_close(desc4); 4315 4316 /* Poll the thread for the async unregister operation */ 4317 poll_threads(); 4318 4319 /* Check that unregister callback is executed */ 4320 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4321 CU_ASSERT(g_unregister_rc == 0); 4322 4323 free_bdev(bdev); 4324 poll_threads(); 4325 } 4326 4327 struct timeout_io_cb_arg { 4328 struct iovec iov; 4329 uint8_t type; 4330 }; 4331 4332 static int 4333 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4334 { 4335 struct spdk_bdev_io *bdev_io; 4336 int n = 0; 4337 4338 if (!ch) { 4339 return -1; 4340 } 4341 4342 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4343 n++; 4344 } 4345 4346 return n; 4347 } 4348 4349 static void 4350 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4351 { 4352 struct timeout_io_cb_arg *ctx = cb_arg; 4353 4354 ctx->type = bdev_io->type; 4355 ctx->iov.iov_base = bdev_io->iov.iov_base; 4356 ctx->iov.iov_len = bdev_io->iov.iov_len; 4357 } 4358 4359 static void 4360 bdev_set_io_timeout(void) 4361 { 4362 struct spdk_bdev *bdev; 4363 struct spdk_bdev_desc *desc = NULL; 4364 struct spdk_io_channel *io_ch = NULL; 4365 struct spdk_bdev_channel *bdev_ch = NULL; 4366 struct timeout_io_cb_arg cb_arg; 4367 4368 ut_init_bdev(NULL); 4369 bdev = allocate_bdev("bdev"); 4370 4371 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4372 SPDK_CU_ASSERT_FATAL(desc != NULL); 4373 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4374 4375 io_ch = spdk_bdev_get_io_channel(desc); 4376 CU_ASSERT(io_ch != NULL); 4377 4378 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4379 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4380 4381 /* This is the part1. 4382 * We will check the bdev_ch->io_submitted list 4383 * TO make sure that it can link IOs and only the user submitted IOs 4384 */ 4385 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4386 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4387 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4388 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4389 stub_complete_io(1); 4390 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4391 stub_complete_io(1); 4392 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4393 4394 /* Split IO */ 4395 bdev->optimal_io_boundary = 16; 4396 bdev->split_on_optimal_io_boundary = true; 4397 4398 /* Now test that a single-vector command is split correctly. 4399 * Offset 14, length 8, payload 0xF000 4400 * Child - Offset 14, length 2, payload 0xF000 4401 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4402 * 4403 * Set up the expected values before calling spdk_bdev_read_blocks 4404 */ 4405 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4406 /* We count all submitted IOs including IO that are generated by splitting. */ 4407 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4408 stub_complete_io(1); 4409 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4410 stub_complete_io(1); 4411 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4412 4413 /* Also include the reset IO */ 4414 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4415 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4416 poll_threads(); 4417 stub_complete_io(1); 4418 poll_threads(); 4419 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4420 4421 /* This is part2 4422 * Test the desc timeout poller register 4423 */ 4424 4425 /* Successfully set the timeout */ 4426 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4427 CU_ASSERT(desc->io_timeout_poller != NULL); 4428 CU_ASSERT(desc->timeout_in_sec == 30); 4429 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4430 CU_ASSERT(desc->cb_arg == &cb_arg); 4431 4432 /* Change the timeout limit */ 4433 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4434 CU_ASSERT(desc->io_timeout_poller != NULL); 4435 CU_ASSERT(desc->timeout_in_sec == 20); 4436 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4437 CU_ASSERT(desc->cb_arg == &cb_arg); 4438 4439 /* Disable the timeout */ 4440 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4441 CU_ASSERT(desc->io_timeout_poller == NULL); 4442 4443 /* This the part3 4444 * We will test to catch timeout IO and check whether the IO is 4445 * the submitted one. 4446 */ 4447 memset(&cb_arg, 0, sizeof(cb_arg)); 4448 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4449 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4450 4451 /* Don't reach the limit */ 4452 spdk_delay_us(15 * spdk_get_ticks_hz()); 4453 poll_threads(); 4454 CU_ASSERT(cb_arg.type == 0); 4455 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4456 CU_ASSERT(cb_arg.iov.iov_len == 0); 4457 4458 /* 15 + 15 = 30 reach the limit */ 4459 spdk_delay_us(15 * spdk_get_ticks_hz()); 4460 poll_threads(); 4461 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4462 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4463 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4464 stub_complete_io(1); 4465 4466 /* Use the same split IO above and check the IO */ 4467 memset(&cb_arg, 0, sizeof(cb_arg)); 4468 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4469 4470 /* The first child complete in time */ 4471 spdk_delay_us(15 * spdk_get_ticks_hz()); 4472 poll_threads(); 4473 stub_complete_io(1); 4474 CU_ASSERT(cb_arg.type == 0); 4475 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4476 CU_ASSERT(cb_arg.iov.iov_len == 0); 4477 4478 /* The second child reach the limit */ 4479 spdk_delay_us(15 * spdk_get_ticks_hz()); 4480 poll_threads(); 4481 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4482 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4483 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4484 stub_complete_io(1); 4485 4486 /* Also include the reset IO */ 4487 memset(&cb_arg, 0, sizeof(cb_arg)); 4488 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4489 spdk_delay_us(30 * spdk_get_ticks_hz()); 4490 poll_threads(); 4491 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4492 stub_complete_io(1); 4493 poll_threads(); 4494 4495 spdk_put_io_channel(io_ch); 4496 spdk_bdev_close(desc); 4497 free_bdev(bdev); 4498 ut_fini_bdev(); 4499 } 4500 4501 static void 4502 bdev_set_qd_sampling(void) 4503 { 4504 struct spdk_bdev *bdev; 4505 struct spdk_bdev_desc *desc = NULL; 4506 struct spdk_io_channel *io_ch = NULL; 4507 struct spdk_bdev_channel *bdev_ch = NULL; 4508 struct timeout_io_cb_arg cb_arg; 4509 4510 ut_init_bdev(NULL); 4511 bdev = allocate_bdev("bdev"); 4512 4513 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4514 SPDK_CU_ASSERT_FATAL(desc != NULL); 4515 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4516 4517 io_ch = spdk_bdev_get_io_channel(desc); 4518 CU_ASSERT(io_ch != NULL); 4519 4520 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4521 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4522 4523 /* This is the part1. 4524 * We will check the bdev_ch->io_submitted list 4525 * TO make sure that it can link IOs and only the user submitted IOs 4526 */ 4527 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4528 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4529 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4530 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4531 stub_complete_io(1); 4532 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4533 stub_complete_io(1); 4534 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4535 4536 /* This is the part2. 4537 * Test the bdev's qd poller register 4538 */ 4539 /* 1st Successfully set the qd sampling period */ 4540 spdk_bdev_set_qd_sampling_period(bdev, 10); 4541 CU_ASSERT(bdev->internal.new_period == 10); 4542 CU_ASSERT(bdev->internal.period == 10); 4543 CU_ASSERT(bdev->internal.qd_desc != NULL); 4544 poll_threads(); 4545 CU_ASSERT(bdev->internal.qd_poller != NULL); 4546 4547 /* 2nd Change the qd sampling period */ 4548 spdk_bdev_set_qd_sampling_period(bdev, 20); 4549 CU_ASSERT(bdev->internal.new_period == 20); 4550 CU_ASSERT(bdev->internal.period == 10); 4551 CU_ASSERT(bdev->internal.qd_desc != NULL); 4552 poll_threads(); 4553 CU_ASSERT(bdev->internal.qd_poller != NULL); 4554 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4555 4556 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4557 spdk_delay_us(20); 4558 poll_thread_times(0, 1); 4559 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4560 spdk_bdev_set_qd_sampling_period(bdev, 30); 4561 CU_ASSERT(bdev->internal.new_period == 30); 4562 CU_ASSERT(bdev->internal.period == 20); 4563 poll_threads(); 4564 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4565 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4566 4567 /* 4th Disable the qd sampling period */ 4568 spdk_bdev_set_qd_sampling_period(bdev, 0); 4569 CU_ASSERT(bdev->internal.new_period == 0); 4570 CU_ASSERT(bdev->internal.period == 30); 4571 poll_threads(); 4572 CU_ASSERT(bdev->internal.qd_poller == NULL); 4573 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4574 CU_ASSERT(bdev->internal.qd_desc == NULL); 4575 4576 /* This is the part3. 4577 * We will test the submitted IO and reset works 4578 * properly with the qd sampling. 4579 */ 4580 memset(&cb_arg, 0, sizeof(cb_arg)); 4581 spdk_bdev_set_qd_sampling_period(bdev, 1); 4582 poll_threads(); 4583 4584 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4585 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4586 4587 /* Also include the reset IO */ 4588 memset(&cb_arg, 0, sizeof(cb_arg)); 4589 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4590 poll_threads(); 4591 4592 /* Close the desc */ 4593 spdk_put_io_channel(io_ch); 4594 spdk_bdev_close(desc); 4595 4596 /* Complete the submitted IO and reset */ 4597 stub_complete_io(2); 4598 poll_threads(); 4599 4600 free_bdev(bdev); 4601 ut_fini_bdev(); 4602 } 4603 4604 static void 4605 lba_range_overlap(void) 4606 { 4607 struct lba_range r1, r2; 4608 4609 r1.offset = 100; 4610 r1.length = 50; 4611 4612 r2.offset = 0; 4613 r2.length = 1; 4614 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4615 4616 r2.offset = 0; 4617 r2.length = 100; 4618 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4619 4620 r2.offset = 0; 4621 r2.length = 110; 4622 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4623 4624 r2.offset = 100; 4625 r2.length = 10; 4626 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4627 4628 r2.offset = 110; 4629 r2.length = 20; 4630 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4631 4632 r2.offset = 140; 4633 r2.length = 150; 4634 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4635 4636 r2.offset = 130; 4637 r2.length = 200; 4638 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4639 4640 r2.offset = 150; 4641 r2.length = 100; 4642 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4643 4644 r2.offset = 110; 4645 r2.length = 0; 4646 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4647 } 4648 4649 static bool g_lock_lba_range_done; 4650 static bool g_unlock_lba_range_done; 4651 4652 static void 4653 lock_lba_range_done(void *ctx, int status) 4654 { 4655 g_lock_lba_range_done = true; 4656 } 4657 4658 static void 4659 unlock_lba_range_done(void *ctx, int status) 4660 { 4661 g_unlock_lba_range_done = true; 4662 } 4663 4664 static void 4665 lock_lba_range_check_ranges(void) 4666 { 4667 struct spdk_bdev *bdev; 4668 struct spdk_bdev_desc *desc = NULL; 4669 struct spdk_io_channel *io_ch; 4670 struct spdk_bdev_channel *channel; 4671 struct lba_range *range; 4672 int ctx1; 4673 int rc; 4674 4675 ut_init_bdev(NULL); 4676 bdev = allocate_bdev("bdev0"); 4677 4678 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4679 CU_ASSERT(rc == 0); 4680 CU_ASSERT(desc != NULL); 4681 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4682 io_ch = spdk_bdev_get_io_channel(desc); 4683 CU_ASSERT(io_ch != NULL); 4684 channel = spdk_io_channel_get_ctx(io_ch); 4685 4686 g_lock_lba_range_done = false; 4687 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4688 CU_ASSERT(rc == 0); 4689 poll_threads(); 4690 4691 CU_ASSERT(g_lock_lba_range_done == true); 4692 range = TAILQ_FIRST(&channel->locked_ranges); 4693 SPDK_CU_ASSERT_FATAL(range != NULL); 4694 CU_ASSERT(range->offset == 20); 4695 CU_ASSERT(range->length == 10); 4696 CU_ASSERT(range->owner_ch == channel); 4697 4698 /* Unlocks must exactly match a lock. */ 4699 g_unlock_lba_range_done = false; 4700 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4701 CU_ASSERT(rc == -EINVAL); 4702 CU_ASSERT(g_unlock_lba_range_done == false); 4703 4704 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4705 CU_ASSERT(rc == 0); 4706 spdk_delay_us(100); 4707 poll_threads(); 4708 4709 CU_ASSERT(g_unlock_lba_range_done == true); 4710 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4711 4712 spdk_put_io_channel(io_ch); 4713 spdk_bdev_close(desc); 4714 free_bdev(bdev); 4715 ut_fini_bdev(); 4716 } 4717 4718 static void 4719 lock_lba_range_with_io_outstanding(void) 4720 { 4721 struct spdk_bdev *bdev; 4722 struct spdk_bdev_desc *desc = NULL; 4723 struct spdk_io_channel *io_ch; 4724 struct spdk_bdev_channel *channel; 4725 struct lba_range *range; 4726 char buf[4096]; 4727 int ctx1; 4728 int rc; 4729 4730 ut_init_bdev(NULL); 4731 bdev = allocate_bdev("bdev0"); 4732 4733 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4734 CU_ASSERT(rc == 0); 4735 CU_ASSERT(desc != NULL); 4736 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4737 io_ch = spdk_bdev_get_io_channel(desc); 4738 CU_ASSERT(io_ch != NULL); 4739 channel = spdk_io_channel_get_ctx(io_ch); 4740 4741 g_io_done = false; 4742 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4743 CU_ASSERT(rc == 0); 4744 4745 g_lock_lba_range_done = false; 4746 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4747 CU_ASSERT(rc == 0); 4748 poll_threads(); 4749 4750 /* The lock should immediately become valid, since there are no outstanding 4751 * write I/O. 4752 */ 4753 CU_ASSERT(g_io_done == false); 4754 CU_ASSERT(g_lock_lba_range_done == true); 4755 range = TAILQ_FIRST(&channel->locked_ranges); 4756 SPDK_CU_ASSERT_FATAL(range != NULL); 4757 CU_ASSERT(range->offset == 20); 4758 CU_ASSERT(range->length == 10); 4759 CU_ASSERT(range->owner_ch == channel); 4760 CU_ASSERT(range->locked_ctx == &ctx1); 4761 4762 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4763 CU_ASSERT(rc == 0); 4764 stub_complete_io(1); 4765 spdk_delay_us(100); 4766 poll_threads(); 4767 4768 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4769 4770 /* Now try again, but with a write I/O. */ 4771 g_io_done = false; 4772 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4773 CU_ASSERT(rc == 0); 4774 4775 g_lock_lba_range_done = false; 4776 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4777 CU_ASSERT(rc == 0); 4778 poll_threads(); 4779 4780 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4781 * But note that the range should be on the channel's locked_list, to make sure no 4782 * new write I/O are started. 4783 */ 4784 CU_ASSERT(g_io_done == false); 4785 CU_ASSERT(g_lock_lba_range_done == false); 4786 range = TAILQ_FIRST(&channel->locked_ranges); 4787 SPDK_CU_ASSERT_FATAL(range != NULL); 4788 CU_ASSERT(range->offset == 20); 4789 CU_ASSERT(range->length == 10); 4790 4791 /* Complete the write I/O. This should make the lock valid (checked by confirming 4792 * our callback was invoked). 4793 */ 4794 stub_complete_io(1); 4795 spdk_delay_us(100); 4796 poll_threads(); 4797 CU_ASSERT(g_io_done == true); 4798 CU_ASSERT(g_lock_lba_range_done == true); 4799 4800 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4801 CU_ASSERT(rc == 0); 4802 poll_threads(); 4803 4804 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4805 4806 spdk_put_io_channel(io_ch); 4807 spdk_bdev_close(desc); 4808 free_bdev(bdev); 4809 ut_fini_bdev(); 4810 } 4811 4812 static void 4813 lock_lba_range_overlapped(void) 4814 { 4815 struct spdk_bdev *bdev; 4816 struct spdk_bdev_desc *desc = NULL; 4817 struct spdk_io_channel *io_ch; 4818 struct spdk_bdev_channel *channel; 4819 struct lba_range *range; 4820 int ctx1; 4821 int rc; 4822 4823 ut_init_bdev(NULL); 4824 bdev = allocate_bdev("bdev0"); 4825 4826 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4827 CU_ASSERT(rc == 0); 4828 CU_ASSERT(desc != NULL); 4829 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4830 io_ch = spdk_bdev_get_io_channel(desc); 4831 CU_ASSERT(io_ch != NULL); 4832 channel = spdk_io_channel_get_ctx(io_ch); 4833 4834 /* Lock range 20-29. */ 4835 g_lock_lba_range_done = false; 4836 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4837 CU_ASSERT(rc == 0); 4838 poll_threads(); 4839 4840 CU_ASSERT(g_lock_lba_range_done == true); 4841 range = TAILQ_FIRST(&channel->locked_ranges); 4842 SPDK_CU_ASSERT_FATAL(range != NULL); 4843 CU_ASSERT(range->offset == 20); 4844 CU_ASSERT(range->length == 10); 4845 4846 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4847 * 20-29. 4848 */ 4849 g_lock_lba_range_done = false; 4850 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4851 CU_ASSERT(rc == 0); 4852 poll_threads(); 4853 4854 CU_ASSERT(g_lock_lba_range_done == false); 4855 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4856 SPDK_CU_ASSERT_FATAL(range != NULL); 4857 CU_ASSERT(range->offset == 25); 4858 CU_ASSERT(range->length == 15); 4859 4860 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4861 * no longer overlaps with an active lock. 4862 */ 4863 g_unlock_lba_range_done = false; 4864 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4865 CU_ASSERT(rc == 0); 4866 poll_threads(); 4867 4868 CU_ASSERT(g_unlock_lba_range_done == true); 4869 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4870 range = TAILQ_FIRST(&channel->locked_ranges); 4871 SPDK_CU_ASSERT_FATAL(range != NULL); 4872 CU_ASSERT(range->offset == 25); 4873 CU_ASSERT(range->length == 15); 4874 4875 /* Lock 40-59. This should immediately lock since it does not overlap with the 4876 * currently active 25-39 lock. 4877 */ 4878 g_lock_lba_range_done = false; 4879 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4880 CU_ASSERT(rc == 0); 4881 poll_threads(); 4882 4883 CU_ASSERT(g_lock_lba_range_done == true); 4884 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4885 SPDK_CU_ASSERT_FATAL(range != NULL); 4886 range = TAILQ_NEXT(range, tailq); 4887 SPDK_CU_ASSERT_FATAL(range != NULL); 4888 CU_ASSERT(range->offset == 40); 4889 CU_ASSERT(range->length == 20); 4890 4891 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4892 g_lock_lba_range_done = false; 4893 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4894 CU_ASSERT(rc == 0); 4895 poll_threads(); 4896 4897 CU_ASSERT(g_lock_lba_range_done == false); 4898 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4899 SPDK_CU_ASSERT_FATAL(range != NULL); 4900 CU_ASSERT(range->offset == 35); 4901 CU_ASSERT(range->length == 10); 4902 4903 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4904 * the 40-59 lock is still active. 4905 */ 4906 g_unlock_lba_range_done = false; 4907 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4908 CU_ASSERT(rc == 0); 4909 poll_threads(); 4910 4911 CU_ASSERT(g_unlock_lba_range_done == true); 4912 CU_ASSERT(g_lock_lba_range_done == false); 4913 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4914 SPDK_CU_ASSERT_FATAL(range != NULL); 4915 CU_ASSERT(range->offset == 35); 4916 CU_ASSERT(range->length == 10); 4917 4918 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4919 * no longer any active overlapping locks. 4920 */ 4921 g_unlock_lba_range_done = false; 4922 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4923 CU_ASSERT(rc == 0); 4924 poll_threads(); 4925 4926 CU_ASSERT(g_unlock_lba_range_done == true); 4927 CU_ASSERT(g_lock_lba_range_done == true); 4928 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4929 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4930 SPDK_CU_ASSERT_FATAL(range != NULL); 4931 CU_ASSERT(range->offset == 35); 4932 CU_ASSERT(range->length == 10); 4933 4934 /* Finally, unlock 35-44. */ 4935 g_unlock_lba_range_done = false; 4936 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4937 CU_ASSERT(rc == 0); 4938 poll_threads(); 4939 4940 CU_ASSERT(g_unlock_lba_range_done == true); 4941 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4942 4943 spdk_put_io_channel(io_ch); 4944 spdk_bdev_close(desc); 4945 free_bdev(bdev); 4946 ut_fini_bdev(); 4947 } 4948 4949 static void 4950 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4951 { 4952 g_abort_done = true; 4953 g_abort_status = bdev_io->internal.status; 4954 spdk_bdev_free_io(bdev_io); 4955 } 4956 4957 static void 4958 bdev_io_abort(void) 4959 { 4960 struct spdk_bdev *bdev; 4961 struct spdk_bdev_desc *desc = NULL; 4962 struct spdk_io_channel *io_ch; 4963 struct spdk_bdev_channel *channel; 4964 struct spdk_bdev_mgmt_channel *mgmt_ch; 4965 struct spdk_bdev_opts bdev_opts = {}; 4966 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 4967 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4968 int rc; 4969 4970 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4971 bdev_opts.bdev_io_pool_size = 7; 4972 bdev_opts.bdev_io_cache_size = 2; 4973 ut_init_bdev(&bdev_opts); 4974 4975 bdev = allocate_bdev("bdev0"); 4976 4977 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4978 CU_ASSERT(rc == 0); 4979 CU_ASSERT(desc != NULL); 4980 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4981 io_ch = spdk_bdev_get_io_channel(desc); 4982 CU_ASSERT(io_ch != NULL); 4983 channel = spdk_io_channel_get_ctx(io_ch); 4984 mgmt_ch = channel->shared_resource->mgmt_ch; 4985 4986 g_abort_done = false; 4987 4988 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4989 4990 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4991 CU_ASSERT(rc == -ENOTSUP); 4992 4993 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4994 4995 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4996 CU_ASSERT(rc == 0); 4997 CU_ASSERT(g_abort_done == true); 4998 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4999 5000 /* Test the case that the target I/O was successfully aborted. */ 5001 g_io_done = false; 5002 5003 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5004 CU_ASSERT(rc == 0); 5005 CU_ASSERT(g_io_done == false); 5006 5007 g_abort_done = false; 5008 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5009 5010 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5011 CU_ASSERT(rc == 0); 5012 CU_ASSERT(g_io_done == true); 5013 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5014 stub_complete_io(1); 5015 CU_ASSERT(g_abort_done == true); 5016 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5017 5018 /* Test the case that the target I/O was not aborted because it completed 5019 * in the middle of execution of the abort. 5020 */ 5021 g_io_done = false; 5022 5023 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5024 CU_ASSERT(rc == 0); 5025 CU_ASSERT(g_io_done == false); 5026 5027 g_abort_done = false; 5028 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5029 5030 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5031 CU_ASSERT(rc == 0); 5032 CU_ASSERT(g_io_done == false); 5033 5034 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5035 stub_complete_io(1); 5036 CU_ASSERT(g_io_done == true); 5037 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5038 5039 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5040 stub_complete_io(1); 5041 CU_ASSERT(g_abort_done == true); 5042 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5043 5044 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5045 5046 bdev->optimal_io_boundary = 16; 5047 bdev->split_on_optimal_io_boundary = true; 5048 5049 /* Test that a single-vector command which is split is aborted correctly. 5050 * Offset 14, length 8, payload 0xF000 5051 * Child - Offset 14, length 2, payload 0xF000 5052 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5053 */ 5054 g_io_done = false; 5055 5056 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5057 CU_ASSERT(rc == 0); 5058 CU_ASSERT(g_io_done == false); 5059 5060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5061 5062 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5063 5064 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5065 CU_ASSERT(rc == 0); 5066 CU_ASSERT(g_io_done == true); 5067 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5068 stub_complete_io(2); 5069 CU_ASSERT(g_abort_done == true); 5070 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5071 5072 /* Test that a multi-vector command that needs to be split by strip and then 5073 * needs to be split is aborted correctly. Abort is requested before the second 5074 * child I/O was submitted. The parent I/O should complete with failure without 5075 * submitting the second child I/O. 5076 */ 5077 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5078 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5079 iov[i].iov_len = 512; 5080 } 5081 5082 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5083 g_io_done = false; 5084 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5085 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5086 CU_ASSERT(rc == 0); 5087 CU_ASSERT(g_io_done == false); 5088 5089 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5090 5091 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5092 5093 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5094 CU_ASSERT(rc == 0); 5095 CU_ASSERT(g_io_done == true); 5096 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5097 stub_complete_io(1); 5098 CU_ASSERT(g_abort_done == true); 5099 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5100 5101 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5102 5103 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5104 5105 bdev->optimal_io_boundary = 16; 5106 g_io_done = false; 5107 5108 /* Test that a ingle-vector command which is split is aborted correctly. 5109 * Differently from the above, the child abort request will be submitted 5110 * sequentially due to the capacity of spdk_bdev_io. 5111 */ 5112 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5113 CU_ASSERT(rc == 0); 5114 CU_ASSERT(g_io_done == false); 5115 5116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5117 5118 g_abort_done = false; 5119 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5120 5121 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5122 CU_ASSERT(rc == 0); 5123 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5124 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5125 5126 stub_complete_io(1); 5127 CU_ASSERT(g_io_done == true); 5128 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5129 stub_complete_io(3); 5130 CU_ASSERT(g_abort_done == true); 5131 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5132 5133 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5134 5135 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5136 5137 spdk_put_io_channel(io_ch); 5138 spdk_bdev_close(desc); 5139 free_bdev(bdev); 5140 ut_fini_bdev(); 5141 } 5142 5143 static void 5144 bdev_unmap(void) 5145 { 5146 struct spdk_bdev *bdev; 5147 struct spdk_bdev_desc *desc = NULL; 5148 struct spdk_io_channel *ioch; 5149 struct spdk_bdev_channel *bdev_ch; 5150 struct ut_expected_io *expected_io; 5151 struct spdk_bdev_opts bdev_opts = {}; 5152 uint32_t i, num_outstanding; 5153 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5154 int rc; 5155 5156 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5157 bdev_opts.bdev_io_pool_size = 512; 5158 bdev_opts.bdev_io_cache_size = 64; 5159 ut_init_bdev(&bdev_opts); 5160 5161 bdev = allocate_bdev("bdev"); 5162 5163 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5164 CU_ASSERT_EQUAL(rc, 0); 5165 SPDK_CU_ASSERT_FATAL(desc != NULL); 5166 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5167 ioch = spdk_bdev_get_io_channel(desc); 5168 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5169 bdev_ch = spdk_io_channel_get_ctx(ioch); 5170 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5171 5172 fn_table.submit_request = stub_submit_request; 5173 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5174 5175 /* Case 1: First test the request won't be split */ 5176 num_blocks = 32; 5177 5178 g_io_done = false; 5179 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5180 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5181 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5182 CU_ASSERT_EQUAL(rc, 0); 5183 CU_ASSERT(g_io_done == false); 5184 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5185 stub_complete_io(1); 5186 CU_ASSERT(g_io_done == true); 5187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5188 5189 /* Case 2: Test the split with 2 children requests */ 5190 bdev->max_unmap = 8; 5191 bdev->max_unmap_segments = 2; 5192 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5193 num_blocks = max_unmap_blocks * 2; 5194 offset = 0; 5195 5196 g_io_done = false; 5197 for (i = 0; i < 2; i++) { 5198 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5199 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5200 offset += max_unmap_blocks; 5201 } 5202 5203 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5204 CU_ASSERT_EQUAL(rc, 0); 5205 CU_ASSERT(g_io_done == false); 5206 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5207 stub_complete_io(2); 5208 CU_ASSERT(g_io_done == true); 5209 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5210 5211 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5212 num_children = 15; 5213 num_blocks = max_unmap_blocks * num_children; 5214 g_io_done = false; 5215 offset = 0; 5216 for (i = 0; i < num_children; i++) { 5217 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5218 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5219 offset += max_unmap_blocks; 5220 } 5221 5222 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5223 CU_ASSERT_EQUAL(rc, 0); 5224 CU_ASSERT(g_io_done == false); 5225 5226 while (num_children > 0) { 5227 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5228 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5229 stub_complete_io(num_outstanding); 5230 num_children -= num_outstanding; 5231 } 5232 CU_ASSERT(g_io_done == true); 5233 5234 spdk_put_io_channel(ioch); 5235 spdk_bdev_close(desc); 5236 free_bdev(bdev); 5237 ut_fini_bdev(); 5238 } 5239 5240 static void 5241 bdev_write_zeroes_split_test(void) 5242 { 5243 struct spdk_bdev *bdev; 5244 struct spdk_bdev_desc *desc = NULL; 5245 struct spdk_io_channel *ioch; 5246 struct spdk_bdev_channel *bdev_ch; 5247 struct ut_expected_io *expected_io; 5248 struct spdk_bdev_opts bdev_opts = {}; 5249 uint32_t i, num_outstanding; 5250 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5251 int rc; 5252 5253 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5254 bdev_opts.bdev_io_pool_size = 512; 5255 bdev_opts.bdev_io_cache_size = 64; 5256 ut_init_bdev(&bdev_opts); 5257 5258 bdev = allocate_bdev("bdev"); 5259 5260 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5261 CU_ASSERT_EQUAL(rc, 0); 5262 SPDK_CU_ASSERT_FATAL(desc != NULL); 5263 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5264 ioch = spdk_bdev_get_io_channel(desc); 5265 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5266 bdev_ch = spdk_io_channel_get_ctx(ioch); 5267 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5268 5269 fn_table.submit_request = stub_submit_request; 5270 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5271 5272 /* Case 1: First test the request won't be split */ 5273 num_blocks = 32; 5274 5275 g_io_done = false; 5276 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5277 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5278 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5279 CU_ASSERT_EQUAL(rc, 0); 5280 CU_ASSERT(g_io_done == false); 5281 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5282 stub_complete_io(1); 5283 CU_ASSERT(g_io_done == true); 5284 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5285 5286 /* Case 2: Test the split with 2 children requests */ 5287 max_write_zeroes_blocks = 8; 5288 bdev->max_write_zeroes = max_write_zeroes_blocks; 5289 num_blocks = max_write_zeroes_blocks * 2; 5290 offset = 0; 5291 5292 g_io_done = false; 5293 for (i = 0; i < 2; i++) { 5294 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5295 0); 5296 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5297 offset += max_write_zeroes_blocks; 5298 } 5299 5300 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5301 CU_ASSERT_EQUAL(rc, 0); 5302 CU_ASSERT(g_io_done == false); 5303 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5304 stub_complete_io(2); 5305 CU_ASSERT(g_io_done == true); 5306 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5307 5308 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5309 num_children = 15; 5310 num_blocks = max_write_zeroes_blocks * num_children; 5311 g_io_done = false; 5312 offset = 0; 5313 for (i = 0; i < num_children; i++) { 5314 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5315 0); 5316 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5317 offset += max_write_zeroes_blocks; 5318 } 5319 5320 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5321 CU_ASSERT_EQUAL(rc, 0); 5322 CU_ASSERT(g_io_done == false); 5323 5324 while (num_children > 0) { 5325 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5326 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5327 stub_complete_io(num_outstanding); 5328 num_children -= num_outstanding; 5329 } 5330 CU_ASSERT(g_io_done == true); 5331 5332 spdk_put_io_channel(ioch); 5333 spdk_bdev_close(desc); 5334 free_bdev(bdev); 5335 ut_fini_bdev(); 5336 } 5337 5338 static void 5339 bdev_set_options_test(void) 5340 { 5341 struct spdk_bdev_opts bdev_opts = {}; 5342 int rc; 5343 5344 /* Case1: Do not set opts_size */ 5345 rc = spdk_bdev_set_opts(&bdev_opts); 5346 CU_ASSERT(rc == -1); 5347 5348 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5349 bdev_opts.bdev_io_pool_size = 4; 5350 bdev_opts.bdev_io_cache_size = 2; 5351 bdev_opts.small_buf_pool_size = 4; 5352 5353 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5354 rc = spdk_bdev_set_opts(&bdev_opts); 5355 CU_ASSERT(rc == -1); 5356 5357 /* Case 3: Do not set valid large_buf_pool_size */ 5358 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5359 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5360 rc = spdk_bdev_set_opts(&bdev_opts); 5361 CU_ASSERT(rc == -1); 5362 5363 /* Case4: set valid large buf_pool_size */ 5364 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5365 rc = spdk_bdev_set_opts(&bdev_opts); 5366 CU_ASSERT(rc == 0); 5367 5368 /* Case5: Set different valid value for small and large buf pool */ 5369 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5370 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5371 rc = spdk_bdev_set_opts(&bdev_opts); 5372 CU_ASSERT(rc == 0); 5373 } 5374 5375 static uint64_t 5376 get_ns_time(void) 5377 { 5378 int rc; 5379 struct timespec ts; 5380 5381 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5382 CU_ASSERT(rc == 0); 5383 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5384 } 5385 5386 static int 5387 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5388 { 5389 int h1, h2; 5390 5391 if (bdev_name == NULL) { 5392 return -1; 5393 } else { 5394 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5395 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5396 5397 return spdk_max(h1, h2) + 1; 5398 } 5399 } 5400 5401 static void 5402 bdev_multi_allocation(void) 5403 { 5404 const int max_bdev_num = 1024 * 16; 5405 char name[max_bdev_num][16]; 5406 char noexist_name[] = "invalid_bdev"; 5407 struct spdk_bdev *bdev[max_bdev_num]; 5408 int i, j; 5409 uint64_t last_time; 5410 int bdev_num; 5411 int height; 5412 5413 for (j = 0; j < max_bdev_num; j++) { 5414 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5415 } 5416 5417 for (i = 0; i < 16; i++) { 5418 last_time = get_ns_time(); 5419 bdev_num = 1024 * (i + 1); 5420 for (j = 0; j < bdev_num; j++) { 5421 bdev[j] = allocate_bdev(name[j]); 5422 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5423 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5424 } 5425 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5426 (get_ns_time() - last_time) / 1000 / 1000); 5427 for (j = 0; j < bdev_num; j++) { 5428 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5429 } 5430 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5431 5432 for (j = 0; j < bdev_num; j++) { 5433 free_bdev(bdev[j]); 5434 } 5435 for (j = 0; j < bdev_num; j++) { 5436 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5437 } 5438 } 5439 } 5440 5441 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5442 5443 static int 5444 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5445 int array_size) 5446 { 5447 if (array_size > 0 && domains) { 5448 domains[0] = g_bdev_memory_domain; 5449 } 5450 5451 return 1; 5452 } 5453 5454 static void 5455 bdev_get_memory_domains(void) 5456 { 5457 struct spdk_bdev_fn_table fn_table = { 5458 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5459 }; 5460 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5461 struct spdk_memory_domain *domains[2] = {}; 5462 int rc; 5463 5464 /* bdev is NULL */ 5465 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5466 CU_ASSERT(rc == -EINVAL); 5467 5468 /* domains is NULL */ 5469 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5470 CU_ASSERT(rc == 1); 5471 5472 /* array size is 0 */ 5473 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5474 CU_ASSERT(rc == 1); 5475 5476 /* get_supported_dma_device_types op is set */ 5477 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5478 CU_ASSERT(rc == 1); 5479 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5480 5481 /* get_supported_dma_device_types op is not set */ 5482 fn_table.get_memory_domains = NULL; 5483 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5484 CU_ASSERT(rc == 0); 5485 } 5486 5487 static void 5488 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5489 { 5490 struct spdk_bdev *bdev; 5491 struct spdk_bdev_desc *desc = NULL; 5492 struct spdk_io_channel *io_ch; 5493 char io_buf[512]; 5494 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5495 struct ut_expected_io *expected_io; 5496 int rc; 5497 5498 ut_init_bdev(NULL); 5499 5500 bdev = allocate_bdev("bdev0"); 5501 bdev->md_interleave = false; 5502 bdev->md_len = 8; 5503 5504 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5505 CU_ASSERT(rc == 0); 5506 SPDK_CU_ASSERT_FATAL(desc != NULL); 5507 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5508 io_ch = spdk_bdev_get_io_channel(desc); 5509 CU_ASSERT(io_ch != NULL); 5510 5511 /* read */ 5512 g_io_done = false; 5513 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5514 if (ext_io_opts) { 5515 expected_io->md_buf = ext_io_opts->metadata; 5516 expected_io->ext_io_opts = ext_io_opts; 5517 } 5518 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5519 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5520 5521 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5522 5523 CU_ASSERT(rc == 0); 5524 CU_ASSERT(g_io_done == false); 5525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5526 stub_complete_io(1); 5527 CU_ASSERT(g_io_done == true); 5528 5529 /* write */ 5530 g_io_done = false; 5531 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5532 if (ext_io_opts) { 5533 expected_io->md_buf = ext_io_opts->metadata; 5534 expected_io->ext_io_opts = ext_io_opts; 5535 } 5536 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5537 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5538 5539 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5540 5541 CU_ASSERT(rc == 0); 5542 CU_ASSERT(g_io_done == false); 5543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5544 stub_complete_io(1); 5545 CU_ASSERT(g_io_done == true); 5546 5547 spdk_put_io_channel(io_ch); 5548 spdk_bdev_close(desc); 5549 free_bdev(bdev); 5550 ut_fini_bdev(); 5551 5552 } 5553 5554 static void 5555 bdev_io_ext(void) 5556 { 5557 struct spdk_bdev_ext_io_opts ext_io_opts = { 5558 .metadata = (void *)0xFF000000, 5559 .size = sizeof(ext_io_opts) 5560 }; 5561 5562 _bdev_io_ext(&ext_io_opts); 5563 } 5564 5565 static void 5566 bdev_io_ext_no_opts(void) 5567 { 5568 _bdev_io_ext(NULL); 5569 } 5570 5571 static void 5572 bdev_io_ext_invalid_opts(void) 5573 { 5574 struct spdk_bdev *bdev; 5575 struct spdk_bdev_desc *desc = NULL; 5576 struct spdk_io_channel *io_ch; 5577 char io_buf[512]; 5578 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5579 struct spdk_bdev_ext_io_opts ext_io_opts = { 5580 .metadata = (void *)0xFF000000, 5581 .size = sizeof(ext_io_opts) 5582 }; 5583 int rc; 5584 5585 ut_init_bdev(NULL); 5586 5587 bdev = allocate_bdev("bdev0"); 5588 bdev->md_interleave = false; 5589 bdev->md_len = 8; 5590 5591 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5592 CU_ASSERT(rc == 0); 5593 SPDK_CU_ASSERT_FATAL(desc != NULL); 5594 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5595 io_ch = spdk_bdev_get_io_channel(desc); 5596 CU_ASSERT(io_ch != NULL); 5597 5598 /* Test invalid ext_opts size */ 5599 ext_io_opts.size = 0; 5600 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5601 CU_ASSERT(rc == -EINVAL); 5602 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5603 CU_ASSERT(rc == -EINVAL); 5604 5605 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5606 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5607 CU_ASSERT(rc == -EINVAL); 5608 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5609 CU_ASSERT(rc == -EINVAL); 5610 5611 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5612 sizeof(ext_io_opts.metadata) - 1; 5613 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5614 CU_ASSERT(rc == -EINVAL); 5615 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5616 CU_ASSERT(rc == -EINVAL); 5617 5618 spdk_put_io_channel(io_ch); 5619 spdk_bdev_close(desc); 5620 free_bdev(bdev); 5621 ut_fini_bdev(); 5622 } 5623 5624 static void 5625 bdev_io_ext_split(void) 5626 { 5627 struct spdk_bdev *bdev; 5628 struct spdk_bdev_desc *desc = NULL; 5629 struct spdk_io_channel *io_ch; 5630 char io_buf[512]; 5631 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5632 struct ut_expected_io *expected_io; 5633 struct spdk_bdev_ext_io_opts ext_io_opts = { 5634 .metadata = (void *)0xFF000000, 5635 .size = sizeof(ext_io_opts) 5636 }; 5637 int rc; 5638 5639 ut_init_bdev(NULL); 5640 5641 bdev = allocate_bdev("bdev0"); 5642 bdev->md_interleave = false; 5643 bdev->md_len = 8; 5644 5645 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5646 CU_ASSERT(rc == 0); 5647 SPDK_CU_ASSERT_FATAL(desc != NULL); 5648 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5649 io_ch = spdk_bdev_get_io_channel(desc); 5650 CU_ASSERT(io_ch != NULL); 5651 5652 /* Check that IO request with ext_opts and metadata is split correctly 5653 * Offset 14, length 8, payload 0xF000 5654 * Child - Offset 14, length 2, payload 0xF000 5655 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5656 */ 5657 bdev->optimal_io_boundary = 16; 5658 bdev->split_on_optimal_io_boundary = true; 5659 bdev->md_interleave = false; 5660 bdev->md_len = 8; 5661 5662 iov.iov_base = (void *)0xF000; 5663 iov.iov_len = 4096; 5664 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5665 ext_io_opts.metadata = (void *)0xFF000000; 5666 ext_io_opts.size = sizeof(ext_io_opts); 5667 g_io_done = false; 5668 5669 /* read */ 5670 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5671 expected_io->md_buf = ext_io_opts.metadata; 5672 expected_io->ext_io_opts = &ext_io_opts; 5673 expected_io->copy_opts = true; 5674 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5675 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5676 5677 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5678 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5679 expected_io->ext_io_opts = &ext_io_opts; 5680 expected_io->copy_opts = true; 5681 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5682 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5683 5684 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5685 CU_ASSERT(rc == 0); 5686 CU_ASSERT(g_io_done == false); 5687 5688 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5689 stub_complete_io(2); 5690 CU_ASSERT(g_io_done == true); 5691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5692 5693 /* write */ 5694 g_io_done = false; 5695 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5696 expected_io->md_buf = ext_io_opts.metadata; 5697 expected_io->ext_io_opts = &ext_io_opts; 5698 expected_io->copy_opts = true; 5699 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5700 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5701 5702 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5703 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5704 expected_io->ext_io_opts = &ext_io_opts; 5705 expected_io->copy_opts = true; 5706 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5708 5709 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5710 CU_ASSERT(rc == 0); 5711 CU_ASSERT(g_io_done == false); 5712 5713 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5714 stub_complete_io(2); 5715 CU_ASSERT(g_io_done == true); 5716 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5717 5718 spdk_put_io_channel(io_ch); 5719 spdk_bdev_close(desc); 5720 free_bdev(bdev); 5721 ut_fini_bdev(); 5722 } 5723 5724 static void 5725 bdev_io_ext_bounce_buffer(void) 5726 { 5727 struct spdk_bdev *bdev; 5728 struct spdk_bdev_desc *desc = NULL; 5729 struct spdk_io_channel *io_ch; 5730 char io_buf[512]; 5731 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5732 struct ut_expected_io *expected_io; 5733 struct spdk_bdev_ext_io_opts ext_io_opts = { 5734 .metadata = (void *)0xFF000000, 5735 .size = sizeof(ext_io_opts) 5736 }; 5737 int rc; 5738 5739 ut_init_bdev(NULL); 5740 5741 bdev = allocate_bdev("bdev0"); 5742 bdev->md_interleave = false; 5743 bdev->md_len = 8; 5744 5745 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5746 CU_ASSERT(rc == 0); 5747 SPDK_CU_ASSERT_FATAL(desc != NULL); 5748 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5749 io_ch = spdk_bdev_get_io_channel(desc); 5750 CU_ASSERT(io_ch != NULL); 5751 5752 /* Verify data pull/push 5753 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5754 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5755 5756 /* read */ 5757 g_io_done = false; 5758 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5759 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5760 expected_io->ext_io_opts = &ext_io_opts; 5761 expected_io->copy_opts = true; 5762 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5763 5764 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5765 5766 CU_ASSERT(rc == 0); 5767 CU_ASSERT(g_io_done == false); 5768 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5769 stub_complete_io(1); 5770 CU_ASSERT(g_memory_domain_push_data_called == true); 5771 CU_ASSERT(g_io_done == true); 5772 5773 /* write */ 5774 g_io_done = false; 5775 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5776 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5777 expected_io->ext_io_opts = &ext_io_opts; 5778 expected_io->copy_opts = true; 5779 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5780 5781 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5782 5783 CU_ASSERT(rc == 0); 5784 CU_ASSERT(g_memory_domain_pull_data_called == true); 5785 CU_ASSERT(g_io_done == false); 5786 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5787 stub_complete_io(1); 5788 CU_ASSERT(g_io_done == true); 5789 5790 spdk_put_io_channel(io_ch); 5791 spdk_bdev_close(desc); 5792 free_bdev(bdev); 5793 ut_fini_bdev(); 5794 } 5795 5796 static void 5797 bdev_register_uuid_alias(void) 5798 { 5799 struct spdk_bdev *bdev, *second; 5800 char uuid[SPDK_UUID_STRING_LEN]; 5801 int rc; 5802 5803 ut_init_bdev(NULL); 5804 bdev = allocate_bdev("bdev0"); 5805 5806 /* Make sure an UUID was generated */ 5807 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5808 5809 /* Check that an UUID alias was registered */ 5810 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5811 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5812 5813 /* Unregister the bdev */ 5814 spdk_bdev_unregister(bdev, NULL, NULL); 5815 poll_threads(); 5816 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5817 5818 /* Check the same, but this time register the bdev with non-zero UUID */ 5819 rc = spdk_bdev_register(bdev); 5820 CU_ASSERT_EQUAL(rc, 0); 5821 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5822 5823 /* Unregister the bdev */ 5824 spdk_bdev_unregister(bdev, NULL, NULL); 5825 poll_threads(); 5826 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5827 5828 /* Regiser the bdev using UUID as the name */ 5829 bdev->name = uuid; 5830 rc = spdk_bdev_register(bdev); 5831 CU_ASSERT_EQUAL(rc, 0); 5832 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5833 5834 /* Unregister the bdev */ 5835 spdk_bdev_unregister(bdev, NULL, NULL); 5836 poll_threads(); 5837 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5838 5839 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5840 bdev->name = "bdev0"; 5841 second = allocate_bdev("bdev1"); 5842 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5843 rc = spdk_bdev_register(bdev); 5844 CU_ASSERT_EQUAL(rc, -EEXIST); 5845 5846 /* Regenerate the UUID and re-check */ 5847 spdk_uuid_generate(&bdev->uuid); 5848 rc = spdk_bdev_register(bdev); 5849 CU_ASSERT_EQUAL(rc, 0); 5850 5851 /* And check that both bdevs can be retrieved through their UUIDs */ 5852 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5853 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5854 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5855 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5856 5857 free_bdev(second); 5858 free_bdev(bdev); 5859 ut_fini_bdev(); 5860 } 5861 5862 static void 5863 bdev_unregister_by_name(void) 5864 { 5865 struct spdk_bdev *bdev; 5866 int rc; 5867 5868 bdev = allocate_bdev("bdev"); 5869 5870 g_event_type1 = 0xFF; 5871 g_unregister_arg = NULL; 5872 g_unregister_rc = -1; 5873 5874 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5875 CU_ASSERT(rc == -ENODEV); 5876 5877 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5878 CU_ASSERT(rc == -ENODEV); 5879 5880 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5881 CU_ASSERT(rc == 0); 5882 5883 /* Check that unregister callback is delayed */ 5884 CU_ASSERT(g_unregister_arg == NULL); 5885 CU_ASSERT(g_unregister_rc == -1); 5886 5887 poll_threads(); 5888 5889 /* Event callback shall not be issued because device was closed */ 5890 CU_ASSERT(g_event_type1 == 0xFF); 5891 /* Unregister callback is issued */ 5892 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5893 CU_ASSERT(g_unregister_rc == 0); 5894 5895 free_bdev(bdev); 5896 } 5897 5898 static int 5899 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5900 { 5901 int *count = ctx; 5902 5903 (*count)++; 5904 5905 return 0; 5906 } 5907 5908 static void 5909 for_each_bdev_test(void) 5910 { 5911 struct spdk_bdev *bdev[8]; 5912 int rc, count; 5913 5914 bdev[0] = allocate_bdev("bdev0"); 5915 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 5916 5917 bdev[1] = allocate_bdev("bdev1"); 5918 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5919 CU_ASSERT(rc == 0); 5920 5921 bdev[2] = allocate_bdev("bdev2"); 5922 5923 bdev[3] = allocate_bdev("bdev3"); 5924 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5925 CU_ASSERT(rc == 0); 5926 5927 bdev[4] = allocate_bdev("bdev4"); 5928 5929 bdev[5] = allocate_bdev("bdev5"); 5930 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5931 CU_ASSERT(rc == 0); 5932 5933 bdev[6] = allocate_bdev("bdev6"); 5934 5935 bdev[7] = allocate_bdev("bdev7"); 5936 5937 count = 0; 5938 rc = spdk_for_each_bdev(&count, count_bdevs); 5939 CU_ASSERT(rc == 0); 5940 CU_ASSERT(count == 7); 5941 5942 count = 0; 5943 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5944 CU_ASSERT(rc == 0); 5945 CU_ASSERT(count == 4); 5946 5947 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 5948 free_bdev(bdev[0]); 5949 free_bdev(bdev[1]); 5950 free_bdev(bdev[2]); 5951 free_bdev(bdev[3]); 5952 free_bdev(bdev[4]); 5953 free_bdev(bdev[5]); 5954 free_bdev(bdev[6]); 5955 free_bdev(bdev[7]); 5956 } 5957 5958 static void 5959 bdev_seek_test(void) 5960 { 5961 struct spdk_bdev *bdev; 5962 struct spdk_bdev_desc *desc = NULL; 5963 struct spdk_io_channel *io_ch; 5964 int rc; 5965 5966 ut_init_bdev(NULL); 5967 poll_threads(); 5968 5969 bdev = allocate_bdev("bdev0"); 5970 5971 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5972 CU_ASSERT(rc == 0); 5973 poll_threads(); 5974 SPDK_CU_ASSERT_FATAL(desc != NULL); 5975 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5976 io_ch = spdk_bdev_get_io_channel(desc); 5977 CU_ASSERT(io_ch != NULL); 5978 5979 /* Seek data not supported */ 5980 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 5981 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5982 CU_ASSERT(rc == 0); 5983 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5984 poll_threads(); 5985 CU_ASSERT(g_seek_offset == 0); 5986 5987 /* Seek hole not supported */ 5988 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 5989 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5990 CU_ASSERT(rc == 0); 5991 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5992 poll_threads(); 5993 CU_ASSERT(g_seek_offset == UINT64_MAX); 5994 5995 /* Seek data supported */ 5996 g_seek_data_offset = 12345; 5997 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 5998 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5999 CU_ASSERT(rc == 0); 6000 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6001 stub_complete_io(1); 6002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6003 CU_ASSERT(g_seek_offset == 12345); 6004 6005 /* Seek hole supported */ 6006 g_seek_hole_offset = 67890; 6007 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6008 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6009 CU_ASSERT(rc == 0); 6010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6011 stub_complete_io(1); 6012 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6013 CU_ASSERT(g_seek_offset == 67890); 6014 6015 spdk_put_io_channel(io_ch); 6016 spdk_bdev_close(desc); 6017 free_bdev(bdev); 6018 ut_fini_bdev(); 6019 } 6020 6021 static void 6022 bdev_copy(void) 6023 { 6024 struct spdk_bdev *bdev; 6025 struct spdk_bdev_desc *desc = NULL; 6026 struct spdk_io_channel *ioch; 6027 struct ut_expected_io *expected_io; 6028 uint64_t src_offset, num_blocks; 6029 uint32_t num_completed; 6030 int rc; 6031 6032 ut_init_bdev(NULL); 6033 bdev = allocate_bdev("bdev"); 6034 6035 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6036 CU_ASSERT_EQUAL(rc, 0); 6037 SPDK_CU_ASSERT_FATAL(desc != NULL); 6038 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6039 ioch = spdk_bdev_get_io_channel(desc); 6040 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6041 6042 fn_table.submit_request = stub_submit_request; 6043 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6044 6045 /* First test that if the bdev supports copy, the request won't be split */ 6046 bdev->md_len = 0; 6047 bdev->blocklen = 4096; 6048 num_blocks = 512; 6049 src_offset = bdev->blockcnt - num_blocks; 6050 6051 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6052 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6053 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6054 CU_ASSERT_EQUAL(rc, 0); 6055 num_completed = stub_complete_io(1); 6056 CU_ASSERT_EQUAL(num_completed, 1); 6057 6058 /* Check that if copy is not supported it'll fail */ 6059 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6060 6061 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6062 CU_ASSERT_EQUAL(rc, -ENOTSUP); 6063 6064 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6065 spdk_put_io_channel(ioch); 6066 spdk_bdev_close(desc); 6067 free_bdev(bdev); 6068 ut_fini_bdev(); 6069 } 6070 6071 static void 6072 bdev_copy_split_test(void) 6073 { 6074 struct spdk_bdev *bdev; 6075 struct spdk_bdev_desc *desc = NULL; 6076 struct spdk_io_channel *ioch; 6077 struct spdk_bdev_channel *bdev_ch; 6078 struct ut_expected_io *expected_io; 6079 struct spdk_bdev_opts bdev_opts = {}; 6080 uint32_t i, num_outstanding; 6081 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6082 int rc; 6083 6084 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6085 bdev_opts.bdev_io_pool_size = 512; 6086 bdev_opts.bdev_io_cache_size = 64; 6087 rc = spdk_bdev_set_opts(&bdev_opts); 6088 CU_ASSERT(rc == 0); 6089 6090 ut_init_bdev(NULL); 6091 bdev = allocate_bdev("bdev"); 6092 6093 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6094 CU_ASSERT_EQUAL(rc, 0); 6095 SPDK_CU_ASSERT_FATAL(desc != NULL); 6096 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6097 ioch = spdk_bdev_get_io_channel(desc); 6098 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6099 bdev_ch = spdk_io_channel_get_ctx(ioch); 6100 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6101 6102 fn_table.submit_request = stub_submit_request; 6103 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6104 6105 /* Case 1: First test the request won't be split */ 6106 num_blocks = 32; 6107 src_offset = bdev->blockcnt - num_blocks; 6108 6109 g_io_done = false; 6110 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6111 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6112 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6113 CU_ASSERT_EQUAL(rc, 0); 6114 CU_ASSERT(g_io_done == false); 6115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6116 stub_complete_io(1); 6117 CU_ASSERT(g_io_done == true); 6118 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6119 6120 /* Case 2: Test the split with 2 children requests */ 6121 max_copy_blocks = 8; 6122 bdev->max_copy = max_copy_blocks; 6123 num_children = 2; 6124 num_blocks = max_copy_blocks * num_children; 6125 offset = 0; 6126 src_offset = bdev->blockcnt - num_blocks; 6127 6128 g_io_done = false; 6129 for (i = 0; i < num_children; i++) { 6130 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6131 src_offset + offset, max_copy_blocks); 6132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6133 offset += max_copy_blocks; 6134 } 6135 6136 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6137 CU_ASSERT_EQUAL(rc, 0); 6138 CU_ASSERT(g_io_done == false); 6139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6140 stub_complete_io(num_children); 6141 CU_ASSERT(g_io_done == true); 6142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6143 6144 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6145 num_children = 15; 6146 num_blocks = max_copy_blocks * num_children; 6147 offset = 0; 6148 src_offset = bdev->blockcnt - num_blocks; 6149 6150 g_io_done = false; 6151 for (i = 0; i < num_children; i++) { 6152 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6153 src_offset + offset, max_copy_blocks); 6154 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6155 offset += max_copy_blocks; 6156 } 6157 6158 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6159 CU_ASSERT_EQUAL(rc, 0); 6160 CU_ASSERT(g_io_done == false); 6161 6162 while (num_children > 0) { 6163 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6164 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6165 stub_complete_io(num_outstanding); 6166 num_children -= num_outstanding; 6167 } 6168 CU_ASSERT(g_io_done == true); 6169 6170 spdk_put_io_channel(ioch); 6171 spdk_bdev_close(desc); 6172 free_bdev(bdev); 6173 ut_fini_bdev(); 6174 } 6175 6176 int 6177 main(int argc, char **argv) 6178 { 6179 CU_pSuite suite = NULL; 6180 unsigned int num_failures; 6181 6182 CU_set_error_action(CUEA_ABORT); 6183 CU_initialize_registry(); 6184 6185 suite = CU_add_suite("bdev", null_init, null_clean); 6186 6187 CU_ADD_TEST(suite, bytes_to_blocks_test); 6188 CU_ADD_TEST(suite, num_blocks_test); 6189 CU_ADD_TEST(suite, io_valid_test); 6190 CU_ADD_TEST(suite, open_write_test); 6191 CU_ADD_TEST(suite, claim_test); 6192 CU_ADD_TEST(suite, alias_add_del_test); 6193 CU_ADD_TEST(suite, get_device_stat_test); 6194 CU_ADD_TEST(suite, bdev_io_types_test); 6195 CU_ADD_TEST(suite, bdev_io_wait_test); 6196 CU_ADD_TEST(suite, bdev_io_spans_split_test); 6197 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 6198 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 6199 CU_ADD_TEST(suite, bdev_io_mix_split_test); 6200 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 6201 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 6202 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 6203 CU_ADD_TEST(suite, bdev_io_alignment); 6204 CU_ADD_TEST(suite, bdev_histograms); 6205 CU_ADD_TEST(suite, bdev_write_zeroes); 6206 CU_ADD_TEST(suite, bdev_compare_and_write); 6207 CU_ADD_TEST(suite, bdev_compare); 6208 CU_ADD_TEST(suite, bdev_compare_emulated); 6209 CU_ADD_TEST(suite, bdev_zcopy_write); 6210 CU_ADD_TEST(suite, bdev_zcopy_read); 6211 CU_ADD_TEST(suite, bdev_open_while_hotremove); 6212 CU_ADD_TEST(suite, bdev_close_while_hotremove); 6213 CU_ADD_TEST(suite, bdev_open_ext); 6214 CU_ADD_TEST(suite, bdev_open_ext_unregister); 6215 CU_ADD_TEST(suite, bdev_set_io_timeout); 6216 CU_ADD_TEST(suite, bdev_set_qd_sampling); 6217 CU_ADD_TEST(suite, lba_range_overlap); 6218 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 6219 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 6220 CU_ADD_TEST(suite, lock_lba_range_overlapped); 6221 CU_ADD_TEST(suite, bdev_io_abort); 6222 CU_ADD_TEST(suite, bdev_unmap); 6223 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 6224 CU_ADD_TEST(suite, bdev_set_options_test); 6225 CU_ADD_TEST(suite, bdev_multi_allocation); 6226 CU_ADD_TEST(suite, bdev_get_memory_domains); 6227 CU_ADD_TEST(suite, bdev_io_ext); 6228 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 6229 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 6230 CU_ADD_TEST(suite, bdev_io_ext_split); 6231 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 6232 CU_ADD_TEST(suite, bdev_register_uuid_alias); 6233 CU_ADD_TEST(suite, bdev_unregister_by_name); 6234 CU_ADD_TEST(suite, for_each_bdev_test); 6235 CU_ADD_TEST(suite, bdev_seek_test); 6236 CU_ADD_TEST(suite, bdev_copy); 6237 CU_ADD_TEST(suite, bdev_copy_split_test); 6238 6239 allocate_cores(1); 6240 allocate_threads(1); 6241 set_thread(0); 6242 6243 CU_basic_set_mode(CU_BRM_VERBOSE); 6244 CU_basic_run_tests(); 6245 num_failures = CU_get_number_of_failures(); 6246 CU_cleanup_registry(); 6247 6248 free_threads(); 6249 free_cores(); 6250 6251 return num_failures; 6252 } 6253