1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 25 static bool g_memory_domain_pull_data_called; 26 static bool g_memory_domain_push_data_called; 27 28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 29 int 30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 31 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 32 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 33 { 34 g_memory_domain_pull_data_called = true; 35 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 36 cpl_cb(cpl_cb_arg, 0); 37 return 0; 38 } 39 40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 41 int 42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 43 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 44 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 45 { 46 g_memory_domain_push_data_called = true; 47 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 48 cpl_cb(cpl_cb_arg, 0); 49 return 0; 50 } 51 52 int g_status; 53 int g_count; 54 enum spdk_bdev_event_type g_event_type1; 55 enum spdk_bdev_event_type g_event_type2; 56 enum spdk_bdev_event_type g_event_type3; 57 enum spdk_bdev_event_type g_event_type4; 58 struct spdk_histogram_data *g_histogram; 59 void *g_unregister_arg; 60 int g_unregister_rc; 61 62 void 63 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 64 int *sc, int *sk, int *asc, int *ascq) 65 { 66 } 67 68 static int 69 null_init(void) 70 { 71 return 0; 72 } 73 74 static int 75 null_clean(void) 76 { 77 return 0; 78 } 79 80 static int 81 stub_destruct(void *ctx) 82 { 83 return 0; 84 } 85 86 struct ut_expected_io { 87 uint8_t type; 88 uint64_t offset; 89 uint64_t length; 90 int iovcnt; 91 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 92 void *md_buf; 93 struct spdk_bdev_ext_io_opts *ext_io_opts; 94 bool copy_opts; 95 TAILQ_ENTRY(ut_expected_io) link; 96 }; 97 98 struct bdev_ut_channel { 99 TAILQ_HEAD(, spdk_bdev_io) outstanding_io; 100 uint32_t outstanding_io_count; 101 TAILQ_HEAD(, ut_expected_io) expected_io; 102 }; 103 104 static bool g_io_done; 105 static struct spdk_bdev_io *g_bdev_io; 106 static enum spdk_bdev_io_status g_io_status; 107 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 108 static uint32_t g_bdev_ut_io_device; 109 static struct bdev_ut_channel *g_bdev_ut_channel; 110 static void *g_compare_read_buf; 111 static uint32_t g_compare_read_buf_len; 112 static void *g_compare_write_buf; 113 static uint32_t g_compare_write_buf_len; 114 static void *g_compare_md_buf; 115 static bool g_abort_done; 116 static enum spdk_bdev_io_status g_abort_status; 117 static void *g_zcopy_read_buf; 118 static uint32_t g_zcopy_read_buf_len; 119 static void *g_zcopy_write_buf; 120 static uint32_t g_zcopy_write_buf_len; 121 static struct spdk_bdev_io *g_zcopy_bdev_io; 122 static uint64_t g_seek_data_offset; 123 static uint64_t g_seek_hole_offset; 124 static uint64_t g_seek_offset; 125 126 static struct ut_expected_io * 127 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 128 { 129 struct ut_expected_io *expected_io; 130 131 expected_io = calloc(1, sizeof(*expected_io)); 132 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 133 134 expected_io->type = type; 135 expected_io->offset = offset; 136 expected_io->length = length; 137 expected_io->iovcnt = iovcnt; 138 139 return expected_io; 140 } 141 142 static void 143 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 144 { 145 expected_io->iov[pos].iov_base = base; 146 expected_io->iov[pos].iov_len = len; 147 } 148 149 static void 150 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 151 { 152 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 153 struct ut_expected_io *expected_io; 154 struct iovec *iov, *expected_iov; 155 struct spdk_bdev_io *bio_to_abort; 156 int i; 157 158 g_bdev_io = bdev_io; 159 160 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 161 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 162 163 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 164 CU_ASSERT(g_compare_read_buf_len == len); 165 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 166 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 167 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 168 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 169 } 170 } 171 172 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 173 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 174 175 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 176 CU_ASSERT(g_compare_write_buf_len == len); 177 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 178 } 179 180 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 181 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 182 183 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 184 CU_ASSERT(g_compare_read_buf_len == len); 185 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 186 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 187 } 188 if (bdev_io->u.bdev.md_buf && 189 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 190 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 191 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 192 } 193 } 194 195 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 196 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 197 TAILQ_FOREACH(bio_to_abort, &ch->outstanding_io, module_link) { 198 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 199 TAILQ_REMOVE(&ch->outstanding_io, bio_to_abort, module_link); 200 ch->outstanding_io_count--; 201 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 202 break; 203 } 204 } 205 } 206 } 207 208 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 209 if (bdev_io->u.bdev.zcopy.start) { 210 g_zcopy_bdev_io = bdev_io; 211 if (bdev_io->u.bdev.zcopy.populate) { 212 /* Start of a read */ 213 CU_ASSERT(g_zcopy_read_buf != NULL); 214 CU_ASSERT(g_zcopy_read_buf_len > 0); 215 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 216 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 217 bdev_io->u.bdev.iovcnt = 1; 218 } else { 219 /* Start of a write */ 220 CU_ASSERT(g_zcopy_write_buf != NULL); 221 CU_ASSERT(g_zcopy_write_buf_len > 0); 222 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 223 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 224 bdev_io->u.bdev.iovcnt = 1; 225 } 226 } else { 227 if (bdev_io->u.bdev.zcopy.commit) { 228 /* End of write */ 229 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 230 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 231 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 232 g_zcopy_write_buf = NULL; 233 g_zcopy_write_buf_len = 0; 234 } else { 235 /* End of read */ 236 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 237 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 238 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 239 g_zcopy_read_buf = NULL; 240 g_zcopy_read_buf_len = 0; 241 } 242 } 243 } 244 245 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 246 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 247 } 248 249 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 250 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 251 } 252 253 TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link); 254 ch->outstanding_io_count++; 255 256 expected_io = TAILQ_FIRST(&ch->expected_io); 257 if (expected_io == NULL) { 258 return; 259 } 260 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 261 262 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 263 CU_ASSERT(bdev_io->type == expected_io->type); 264 } 265 266 if (expected_io->md_buf != NULL) { 267 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 268 if (bdev_io->u.bdev.ext_opts) { 269 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.ext_opts->metadata); 270 } 271 } 272 273 if (expected_io->copy_opts) { 274 if (expected_io->ext_io_opts) { 275 /* opts are not NULL so it should have been copied */ 276 CU_ASSERT(expected_io->ext_io_opts != bdev_io->u.bdev.ext_opts); 277 CU_ASSERT(bdev_io->u.bdev.ext_opts == &bdev_io->internal.ext_opts_copy); 278 /* internal opts always points to opts passed */ 279 CU_ASSERT(expected_io->ext_io_opts == bdev_io->internal.ext_opts); 280 } else { 281 /* passed opts was NULL so we expect bdev_io opts to be NULL */ 282 CU_ASSERT(bdev_io->u.bdev.ext_opts == NULL); 283 } 284 } else { 285 /* opts were not copied so they should be equal */ 286 CU_ASSERT(expected_io->ext_io_opts == bdev_io->u.bdev.ext_opts); 287 } 288 289 if (expected_io->length == 0) { 290 free(expected_io); 291 return; 292 } 293 294 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 295 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 296 297 if (expected_io->iovcnt == 0) { 298 free(expected_io); 299 /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */ 300 return; 301 } 302 303 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 304 for (i = 0; i < expected_io->iovcnt; i++) { 305 expected_iov = &expected_io->iov[i]; 306 if (bdev_io->internal.orig_iovcnt == 0) { 307 iov = &bdev_io->u.bdev.iovs[i]; 308 } else { 309 iov = bdev_io->internal.orig_iovs; 310 } 311 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 312 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 313 } 314 315 free(expected_io); 316 } 317 318 static void 319 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 320 struct spdk_bdev_io *bdev_io, bool success) 321 { 322 CU_ASSERT(success == true); 323 324 stub_submit_request(_ch, bdev_io); 325 } 326 327 static void 328 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 329 { 330 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 331 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 332 } 333 334 static uint32_t 335 stub_complete_io(uint32_t num_to_complete) 336 { 337 struct bdev_ut_channel *ch = g_bdev_ut_channel; 338 struct spdk_bdev_io *bdev_io; 339 static enum spdk_bdev_io_status io_status; 340 uint32_t num_completed = 0; 341 342 while (num_completed < num_to_complete) { 343 if (TAILQ_EMPTY(&ch->outstanding_io)) { 344 break; 345 } 346 bdev_io = TAILQ_FIRST(&ch->outstanding_io); 347 TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link); 348 ch->outstanding_io_count--; 349 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 350 g_io_exp_status; 351 spdk_bdev_io_complete(bdev_io, io_status); 352 num_completed++; 353 } 354 355 return num_completed; 356 } 357 358 static struct spdk_io_channel * 359 bdev_ut_get_io_channel(void *ctx) 360 { 361 return spdk_get_io_channel(&g_bdev_ut_io_device); 362 } 363 364 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 365 [SPDK_BDEV_IO_TYPE_READ] = true, 366 [SPDK_BDEV_IO_TYPE_WRITE] = true, 367 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 368 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 369 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 370 [SPDK_BDEV_IO_TYPE_RESET] = true, 371 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 372 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 373 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 374 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 375 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 376 [SPDK_BDEV_IO_TYPE_ABORT] = true, 377 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 378 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 379 }; 380 381 static void 382 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 383 { 384 g_io_types_supported[io_type] = enable; 385 } 386 387 static bool 388 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 389 { 390 return g_io_types_supported[io_type]; 391 } 392 393 static struct spdk_bdev_fn_table fn_table = { 394 .destruct = stub_destruct, 395 .submit_request = stub_submit_request, 396 .get_io_channel = bdev_ut_get_io_channel, 397 .io_type_supported = stub_io_type_supported, 398 }; 399 400 static int 401 bdev_ut_create_ch(void *io_device, void *ctx_buf) 402 { 403 struct bdev_ut_channel *ch = ctx_buf; 404 405 CU_ASSERT(g_bdev_ut_channel == NULL); 406 g_bdev_ut_channel = ch; 407 408 TAILQ_INIT(&ch->outstanding_io); 409 ch->outstanding_io_count = 0; 410 TAILQ_INIT(&ch->expected_io); 411 return 0; 412 } 413 414 static void 415 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 416 { 417 CU_ASSERT(g_bdev_ut_channel != NULL); 418 g_bdev_ut_channel = NULL; 419 } 420 421 struct spdk_bdev_module bdev_ut_if; 422 423 static int 424 bdev_ut_module_init(void) 425 { 426 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 427 sizeof(struct bdev_ut_channel), NULL); 428 spdk_bdev_module_init_done(&bdev_ut_if); 429 return 0; 430 } 431 432 static void 433 bdev_ut_module_fini(void) 434 { 435 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 436 } 437 438 struct spdk_bdev_module bdev_ut_if = { 439 .name = "bdev_ut", 440 .module_init = bdev_ut_module_init, 441 .module_fini = bdev_ut_module_fini, 442 .async_init = true, 443 }; 444 445 static void vbdev_ut_examine(struct spdk_bdev *bdev); 446 447 static int 448 vbdev_ut_module_init(void) 449 { 450 return 0; 451 } 452 453 static void 454 vbdev_ut_module_fini(void) 455 { 456 } 457 458 struct spdk_bdev_module vbdev_ut_if = { 459 .name = "vbdev_ut", 460 .module_init = vbdev_ut_module_init, 461 .module_fini = vbdev_ut_module_fini, 462 .examine_config = vbdev_ut_examine, 463 }; 464 465 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 466 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 467 468 static void 469 vbdev_ut_examine(struct spdk_bdev *bdev) 470 { 471 spdk_bdev_module_examine_done(&vbdev_ut_if); 472 } 473 474 static struct spdk_bdev * 475 allocate_bdev(char *name) 476 { 477 struct spdk_bdev *bdev; 478 int rc; 479 480 bdev = calloc(1, sizeof(*bdev)); 481 SPDK_CU_ASSERT_FATAL(bdev != NULL); 482 483 bdev->name = name; 484 bdev->fn_table = &fn_table; 485 bdev->module = &bdev_ut_if; 486 bdev->blockcnt = 1024; 487 bdev->blocklen = 512; 488 489 rc = spdk_bdev_register(bdev); 490 poll_threads(); 491 CU_ASSERT(rc == 0); 492 493 return bdev; 494 } 495 496 static struct spdk_bdev * 497 allocate_vbdev(char *name) 498 { 499 struct spdk_bdev *bdev; 500 int rc; 501 502 bdev = calloc(1, sizeof(*bdev)); 503 SPDK_CU_ASSERT_FATAL(bdev != NULL); 504 505 bdev->name = name; 506 bdev->fn_table = &fn_table; 507 bdev->module = &vbdev_ut_if; 508 509 rc = spdk_bdev_register(bdev); 510 poll_threads(); 511 CU_ASSERT(rc == 0); 512 513 return bdev; 514 } 515 516 static void 517 free_bdev(struct spdk_bdev *bdev) 518 { 519 spdk_bdev_unregister(bdev, NULL, NULL); 520 poll_threads(); 521 memset(bdev, 0xFF, sizeof(*bdev)); 522 free(bdev); 523 } 524 525 static void 526 free_vbdev(struct spdk_bdev *bdev) 527 { 528 spdk_bdev_unregister(bdev, NULL, NULL); 529 poll_threads(); 530 memset(bdev, 0xFF, sizeof(*bdev)); 531 free(bdev); 532 } 533 534 static void 535 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 536 { 537 const char *bdev_name; 538 539 CU_ASSERT(bdev != NULL); 540 CU_ASSERT(rc == 0); 541 bdev_name = spdk_bdev_get_name(bdev); 542 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 543 544 free(stat); 545 546 *(bool *)cb_arg = true; 547 } 548 549 static void 550 bdev_unregister_cb(void *cb_arg, int rc) 551 { 552 g_unregister_arg = cb_arg; 553 g_unregister_rc = rc; 554 } 555 556 static void 557 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 558 { 559 } 560 561 static void 562 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 563 { 564 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 565 566 g_event_type1 = type; 567 if (SPDK_BDEV_EVENT_REMOVE == type) { 568 spdk_bdev_close(desc); 569 } 570 } 571 572 static void 573 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 574 { 575 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 576 577 g_event_type2 = type; 578 if (SPDK_BDEV_EVENT_REMOVE == type) { 579 spdk_bdev_close(desc); 580 } 581 } 582 583 static void 584 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 585 { 586 g_event_type3 = type; 587 } 588 589 static void 590 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 591 { 592 g_event_type4 = type; 593 } 594 595 static void 596 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 597 { 598 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 599 spdk_bdev_free_io(bdev_io); 600 } 601 602 static void 603 get_device_stat_test(void) 604 { 605 struct spdk_bdev *bdev; 606 struct spdk_bdev_io_stat *stat; 607 bool done; 608 609 bdev = allocate_bdev("bdev0"); 610 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 611 if (stat == NULL) { 612 free_bdev(bdev); 613 return; 614 } 615 616 done = false; 617 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 618 while (!done) { poll_threads(); } 619 620 free_bdev(bdev); 621 } 622 623 static void 624 open_write_test(void) 625 { 626 struct spdk_bdev *bdev[9]; 627 struct spdk_bdev_desc *desc[9] = {}; 628 int rc; 629 630 /* 631 * Create a tree of bdevs to test various open w/ write cases. 632 * 633 * bdev0 through bdev3 are physical block devices, such as NVMe 634 * namespaces or Ceph block devices. 635 * 636 * bdev4 is a virtual bdev with multiple base bdevs. This models 637 * caching or RAID use cases. 638 * 639 * bdev5 through bdev7 are all virtual bdevs with the same base 640 * bdev (except bdev7). This models partitioning or logical volume 641 * use cases. 642 * 643 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 644 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 645 * models caching, RAID, partitioning or logical volumes use cases. 646 * 647 * bdev8 is a virtual bdev with multiple base bdevs, but these 648 * base bdevs are themselves virtual bdevs. 649 * 650 * bdev8 651 * | 652 * +----------+ 653 * | | 654 * bdev4 bdev5 bdev6 bdev7 655 * | | | | 656 * +---+---+ +---+ + +---+---+ 657 * | | \ | / \ 658 * bdev0 bdev1 bdev2 bdev3 659 */ 660 661 bdev[0] = allocate_bdev("bdev0"); 662 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 663 CU_ASSERT(rc == 0); 664 665 bdev[1] = allocate_bdev("bdev1"); 666 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 667 CU_ASSERT(rc == 0); 668 669 bdev[2] = allocate_bdev("bdev2"); 670 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 671 CU_ASSERT(rc == 0); 672 673 bdev[3] = allocate_bdev("bdev3"); 674 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 675 CU_ASSERT(rc == 0); 676 677 bdev[4] = allocate_vbdev("bdev4"); 678 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 679 CU_ASSERT(rc == 0); 680 681 bdev[5] = allocate_vbdev("bdev5"); 682 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 683 CU_ASSERT(rc == 0); 684 685 bdev[6] = allocate_vbdev("bdev6"); 686 687 bdev[7] = allocate_vbdev("bdev7"); 688 689 bdev[8] = allocate_vbdev("bdev8"); 690 691 /* Open bdev0 read-only. This should succeed. */ 692 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 693 CU_ASSERT(rc == 0); 694 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 695 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 696 spdk_bdev_close(desc[0]); 697 698 /* 699 * Open bdev1 read/write. This should fail since bdev1 has been claimed 700 * by a vbdev module. 701 */ 702 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 703 CU_ASSERT(rc == -EPERM); 704 705 /* 706 * Open bdev4 read/write. This should fail since bdev3 has been claimed 707 * by a vbdev module. 708 */ 709 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 710 CU_ASSERT(rc == -EPERM); 711 712 /* Open bdev4 read-only. This should succeed. */ 713 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 714 CU_ASSERT(rc == 0); 715 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 716 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 717 spdk_bdev_close(desc[4]); 718 719 /* 720 * Open bdev8 read/write. This should succeed since it is a leaf 721 * bdev. 722 */ 723 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 724 CU_ASSERT(rc == 0); 725 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 726 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 727 spdk_bdev_close(desc[8]); 728 729 /* 730 * Open bdev5 read/write. This should fail since bdev4 has been claimed 731 * by a vbdev module. 732 */ 733 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 734 CU_ASSERT(rc == -EPERM); 735 736 /* Open bdev4 read-only. This should succeed. */ 737 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 738 CU_ASSERT(rc == 0); 739 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 740 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 741 spdk_bdev_close(desc[5]); 742 743 free_vbdev(bdev[8]); 744 745 free_vbdev(bdev[5]); 746 free_vbdev(bdev[6]); 747 free_vbdev(bdev[7]); 748 749 free_vbdev(bdev[4]); 750 751 free_bdev(bdev[0]); 752 free_bdev(bdev[1]); 753 free_bdev(bdev[2]); 754 free_bdev(bdev[3]); 755 } 756 757 static void 758 claim_test(void) 759 { 760 struct spdk_bdev *bdev; 761 struct spdk_bdev_desc *desc, *open_desc; 762 int rc; 763 uint32_t count; 764 765 /* 766 * A vbdev that uses a read-only bdev may need it to remain read-only. 767 * To do so, it opens the bdev read-only, then claims it without 768 * passing a spdk_bdev_desc. 769 */ 770 bdev = allocate_bdev("bdev0"); 771 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 772 CU_ASSERT(rc == 0); 773 CU_ASSERT(desc->write == false); 774 775 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 776 CU_ASSERT(rc == 0); 777 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 778 779 /* There should be only one open descriptor and it should still be ro */ 780 count = 0; 781 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 782 CU_ASSERT(open_desc == desc); 783 CU_ASSERT(!open_desc->write); 784 count++; 785 } 786 CU_ASSERT(count == 1); 787 788 /* A read-only bdev is upgraded to read-write if desc is passed. */ 789 spdk_bdev_module_release_bdev(bdev); 790 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 791 CU_ASSERT(rc == 0); 792 CU_ASSERT(bdev->internal.claim_module == &bdev_ut_if); 793 794 /* There should be only one open descriptor and it should be rw */ 795 count = 0; 796 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 797 CU_ASSERT(open_desc == desc); 798 CU_ASSERT(open_desc->write); 799 count++; 800 } 801 CU_ASSERT(count == 1); 802 803 spdk_bdev_close(desc); 804 free_bdev(bdev); 805 } 806 807 static void 808 bytes_to_blocks_test(void) 809 { 810 struct spdk_bdev bdev; 811 uint64_t offset_blocks, num_blocks; 812 813 memset(&bdev, 0, sizeof(bdev)); 814 815 bdev.blocklen = 512; 816 817 /* All parameters valid */ 818 offset_blocks = 0; 819 num_blocks = 0; 820 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 821 CU_ASSERT(offset_blocks == 1); 822 CU_ASSERT(num_blocks == 2); 823 824 /* Offset not a block multiple */ 825 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 826 827 /* Length not a block multiple */ 828 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 829 830 /* In case blocklen not the power of two */ 831 bdev.blocklen = 100; 832 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 833 CU_ASSERT(offset_blocks == 1); 834 CU_ASSERT(num_blocks == 2); 835 836 /* Offset not a block multiple */ 837 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 838 839 /* Length not a block multiple */ 840 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 841 } 842 843 static void 844 num_blocks_test(void) 845 { 846 struct spdk_bdev bdev; 847 struct spdk_bdev_desc *desc = NULL; 848 int rc; 849 850 memset(&bdev, 0, sizeof(bdev)); 851 bdev.name = "num_blocks"; 852 bdev.fn_table = &fn_table; 853 bdev.module = &bdev_ut_if; 854 spdk_bdev_register(&bdev); 855 poll_threads(); 856 spdk_bdev_notify_blockcnt_change(&bdev, 50); 857 858 /* Growing block number */ 859 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0); 860 /* Shrinking block number */ 861 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0); 862 863 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 864 CU_ASSERT(rc == 0); 865 SPDK_CU_ASSERT_FATAL(desc != NULL); 866 CU_ASSERT(&bdev == spdk_bdev_desc_get_bdev(desc)); 867 868 /* Growing block number */ 869 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0); 870 /* Shrinking block number */ 871 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0); 872 873 g_event_type1 = 0xFF; 874 /* Growing block number */ 875 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 90) == 0); 876 877 poll_threads(); 878 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 879 880 g_event_type1 = 0xFF; 881 /* Growing block number and closing */ 882 CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 100) == 0); 883 884 spdk_bdev_close(desc); 885 spdk_bdev_unregister(&bdev, NULL, NULL); 886 887 poll_threads(); 888 889 /* Callback is not called for closed device */ 890 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 891 } 892 893 static void 894 io_valid_test(void) 895 { 896 struct spdk_bdev bdev; 897 898 memset(&bdev, 0, sizeof(bdev)); 899 900 bdev.blocklen = 512; 901 CU_ASSERT(pthread_mutex_init(&bdev.internal.mutex, NULL) == 0); 902 903 spdk_bdev_notify_blockcnt_change(&bdev, 100); 904 905 /* All parameters valid */ 906 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 907 908 /* Last valid block */ 909 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 910 911 /* Offset past end of bdev */ 912 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 913 914 /* Offset + length past end of bdev */ 915 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 916 917 /* Offset near end of uint64_t range (2^64 - 1) */ 918 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 919 920 CU_ASSERT(pthread_mutex_destroy(&bdev.internal.mutex) == 0); 921 } 922 923 static void 924 alias_add_del_test(void) 925 { 926 struct spdk_bdev *bdev[3]; 927 int rc; 928 929 /* Creating and registering bdevs */ 930 bdev[0] = allocate_bdev("bdev0"); 931 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 932 933 bdev[1] = allocate_bdev("bdev1"); 934 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 935 936 bdev[2] = allocate_bdev("bdev2"); 937 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 938 939 poll_threads(); 940 941 /* 942 * Trying adding an alias identical to name. 943 * Alias is identical to name, so it can not be added to aliases list 944 */ 945 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 946 CU_ASSERT(rc == -EEXIST); 947 948 /* 949 * Trying to add empty alias, 950 * this one should fail 951 */ 952 rc = spdk_bdev_alias_add(bdev[0], NULL); 953 CU_ASSERT(rc == -EINVAL); 954 955 /* Trying adding same alias to two different registered bdevs */ 956 957 /* Alias is used first time, so this one should pass */ 958 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 959 CU_ASSERT(rc == 0); 960 961 /* Alias was added to another bdev, so this one should fail */ 962 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 963 CU_ASSERT(rc == -EEXIST); 964 965 /* Alias is used first time, so this one should pass */ 966 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 967 CU_ASSERT(rc == 0); 968 969 /* Trying removing an alias from registered bdevs */ 970 971 /* Alias is not on a bdev aliases list, so this one should fail */ 972 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 973 CU_ASSERT(rc == -ENOENT); 974 975 /* Alias is present on a bdev aliases list, so this one should pass */ 976 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 977 CU_ASSERT(rc == 0); 978 979 /* Alias is present on a bdev aliases list, so this one should pass */ 980 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 981 CU_ASSERT(rc == 0); 982 983 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 984 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 985 CU_ASSERT(rc != 0); 986 987 /* Trying to del all alias from empty alias list */ 988 spdk_bdev_alias_del_all(bdev[2]); 989 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 990 991 /* Trying to del all alias from non-empty alias list */ 992 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 993 CU_ASSERT(rc == 0); 994 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 995 CU_ASSERT(rc == 0); 996 spdk_bdev_alias_del_all(bdev[2]); 997 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 998 999 /* Unregister and free bdevs */ 1000 spdk_bdev_unregister(bdev[0], NULL, NULL); 1001 spdk_bdev_unregister(bdev[1], NULL, NULL); 1002 spdk_bdev_unregister(bdev[2], NULL, NULL); 1003 1004 poll_threads(); 1005 1006 free(bdev[0]); 1007 free(bdev[1]); 1008 free(bdev[2]); 1009 } 1010 1011 static void 1012 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1013 { 1014 g_io_done = true; 1015 g_io_status = bdev_io->internal.status; 1016 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1017 (bdev_io->u.bdev.zcopy.start)) { 1018 g_zcopy_bdev_io = bdev_io; 1019 } else { 1020 spdk_bdev_free_io(bdev_io); 1021 g_zcopy_bdev_io = NULL; 1022 } 1023 } 1024 1025 static void 1026 bdev_init_cb(void *arg, int rc) 1027 { 1028 CU_ASSERT(rc == 0); 1029 } 1030 1031 static void 1032 bdev_fini_cb(void *arg) 1033 { 1034 } 1035 1036 struct bdev_ut_io_wait_entry { 1037 struct spdk_bdev_io_wait_entry entry; 1038 struct spdk_io_channel *io_ch; 1039 struct spdk_bdev_desc *desc; 1040 bool submitted; 1041 }; 1042 1043 static void 1044 io_wait_cb(void *arg) 1045 { 1046 struct bdev_ut_io_wait_entry *entry = arg; 1047 int rc; 1048 1049 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1050 CU_ASSERT(rc == 0); 1051 entry->submitted = true; 1052 } 1053 1054 static void 1055 bdev_io_types_test(void) 1056 { 1057 struct spdk_bdev *bdev; 1058 struct spdk_bdev_desc *desc = NULL; 1059 struct spdk_io_channel *io_ch; 1060 struct spdk_bdev_opts bdev_opts = {}; 1061 int rc; 1062 1063 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1064 bdev_opts.bdev_io_pool_size = 4; 1065 bdev_opts.bdev_io_cache_size = 2; 1066 1067 rc = spdk_bdev_set_opts(&bdev_opts); 1068 CU_ASSERT(rc == 0); 1069 spdk_bdev_initialize(bdev_init_cb, NULL); 1070 poll_threads(); 1071 1072 bdev = allocate_bdev("bdev0"); 1073 1074 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1075 CU_ASSERT(rc == 0); 1076 poll_threads(); 1077 SPDK_CU_ASSERT_FATAL(desc != NULL); 1078 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1079 io_ch = spdk_bdev_get_io_channel(desc); 1080 CU_ASSERT(io_ch != NULL); 1081 1082 /* WRITE and WRITE ZEROES are not supported */ 1083 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1084 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1085 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1086 CU_ASSERT(rc == -ENOTSUP); 1087 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1088 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1089 1090 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1091 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1092 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1093 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1094 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1095 CU_ASSERT(rc == -ENOTSUP); 1096 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1097 CU_ASSERT(rc == -ENOTSUP); 1098 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1099 CU_ASSERT(rc == -ENOTSUP); 1100 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1101 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1102 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1103 1104 spdk_put_io_channel(io_ch); 1105 spdk_bdev_close(desc); 1106 free_bdev(bdev); 1107 spdk_bdev_finish(bdev_fini_cb, NULL); 1108 poll_threads(); 1109 } 1110 1111 static void 1112 bdev_io_wait_test(void) 1113 { 1114 struct spdk_bdev *bdev; 1115 struct spdk_bdev_desc *desc = NULL; 1116 struct spdk_io_channel *io_ch; 1117 struct spdk_bdev_opts bdev_opts = {}; 1118 struct bdev_ut_io_wait_entry io_wait_entry; 1119 struct bdev_ut_io_wait_entry io_wait_entry2; 1120 int rc; 1121 1122 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1123 bdev_opts.bdev_io_pool_size = 4; 1124 bdev_opts.bdev_io_cache_size = 2; 1125 1126 rc = spdk_bdev_set_opts(&bdev_opts); 1127 CU_ASSERT(rc == 0); 1128 spdk_bdev_initialize(bdev_init_cb, NULL); 1129 poll_threads(); 1130 1131 bdev = allocate_bdev("bdev0"); 1132 1133 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1134 CU_ASSERT(rc == 0); 1135 poll_threads(); 1136 SPDK_CU_ASSERT_FATAL(desc != NULL); 1137 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1138 io_ch = spdk_bdev_get_io_channel(desc); 1139 CU_ASSERT(io_ch != NULL); 1140 1141 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1142 CU_ASSERT(rc == 0); 1143 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1144 CU_ASSERT(rc == 0); 1145 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1146 CU_ASSERT(rc == 0); 1147 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1148 CU_ASSERT(rc == 0); 1149 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1150 1151 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1152 CU_ASSERT(rc == -ENOMEM); 1153 1154 io_wait_entry.entry.bdev = bdev; 1155 io_wait_entry.entry.cb_fn = io_wait_cb; 1156 io_wait_entry.entry.cb_arg = &io_wait_entry; 1157 io_wait_entry.io_ch = io_ch; 1158 io_wait_entry.desc = desc; 1159 io_wait_entry.submitted = false; 1160 /* Cannot use the same io_wait_entry for two different calls. */ 1161 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1162 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1163 1164 /* Queue two I/O waits. */ 1165 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1166 CU_ASSERT(rc == 0); 1167 CU_ASSERT(io_wait_entry.submitted == false); 1168 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1169 CU_ASSERT(rc == 0); 1170 CU_ASSERT(io_wait_entry2.submitted == false); 1171 1172 stub_complete_io(1); 1173 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1174 CU_ASSERT(io_wait_entry.submitted == true); 1175 CU_ASSERT(io_wait_entry2.submitted == false); 1176 1177 stub_complete_io(1); 1178 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1179 CU_ASSERT(io_wait_entry2.submitted == true); 1180 1181 stub_complete_io(4); 1182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1183 1184 spdk_put_io_channel(io_ch); 1185 spdk_bdev_close(desc); 1186 free_bdev(bdev); 1187 spdk_bdev_finish(bdev_fini_cb, NULL); 1188 poll_threads(); 1189 } 1190 1191 static void 1192 bdev_io_spans_split_test(void) 1193 { 1194 struct spdk_bdev bdev; 1195 struct spdk_bdev_io bdev_io; 1196 struct iovec iov[BDEV_IO_NUM_CHILD_IOV]; 1197 1198 memset(&bdev, 0, sizeof(bdev)); 1199 bdev_io.u.bdev.iovs = iov; 1200 1201 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1202 bdev.optimal_io_boundary = 0; 1203 bdev.max_segment_size = 0; 1204 bdev.max_num_segments = 0; 1205 bdev_io.bdev = &bdev; 1206 1207 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1208 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1209 1210 bdev.split_on_optimal_io_boundary = true; 1211 bdev.optimal_io_boundary = 32; 1212 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1213 1214 /* RESETs are not based on LBAs - so this should return false. */ 1215 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1216 1217 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1218 bdev_io.u.bdev.offset_blocks = 0; 1219 bdev_io.u.bdev.num_blocks = 32; 1220 1221 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1222 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1223 1224 bdev_io.u.bdev.num_blocks = 33; 1225 1226 /* This I/O spans a boundary. */ 1227 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1228 1229 bdev_io.u.bdev.num_blocks = 32; 1230 bdev.max_segment_size = 512 * 32; 1231 bdev.max_num_segments = 1; 1232 bdev_io.u.bdev.iovcnt = 1; 1233 iov[0].iov_len = 512; 1234 1235 /* Does not cross and exceed max_size or max_segs */ 1236 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1237 1238 bdev.split_on_optimal_io_boundary = false; 1239 bdev.max_segment_size = 512; 1240 bdev.max_num_segments = 1; 1241 bdev_io.u.bdev.iovcnt = 2; 1242 1243 /* Exceed max_segs */ 1244 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1245 1246 bdev.max_num_segments = 2; 1247 iov[0].iov_len = 513; 1248 iov[1].iov_len = 512; 1249 1250 /* Exceed max_sizes */ 1251 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1252 } 1253 1254 static void 1255 bdev_io_boundary_split_test(void) 1256 { 1257 struct spdk_bdev *bdev; 1258 struct spdk_bdev_desc *desc = NULL; 1259 struct spdk_io_channel *io_ch; 1260 struct spdk_bdev_opts bdev_opts = {}; 1261 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1262 struct ut_expected_io *expected_io; 1263 void *md_buf = (void *)0xFF000000; 1264 uint64_t i; 1265 int rc; 1266 1267 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1268 bdev_opts.bdev_io_pool_size = 512; 1269 bdev_opts.bdev_io_cache_size = 64; 1270 1271 rc = spdk_bdev_set_opts(&bdev_opts); 1272 CU_ASSERT(rc == 0); 1273 spdk_bdev_initialize(bdev_init_cb, NULL); 1274 1275 bdev = allocate_bdev("bdev0"); 1276 1277 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1278 CU_ASSERT(rc == 0); 1279 SPDK_CU_ASSERT_FATAL(desc != NULL); 1280 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1281 io_ch = spdk_bdev_get_io_channel(desc); 1282 CU_ASSERT(io_ch != NULL); 1283 1284 bdev->optimal_io_boundary = 16; 1285 bdev->split_on_optimal_io_boundary = false; 1286 1287 g_io_done = false; 1288 1289 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1290 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1291 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1292 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1293 1294 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1295 CU_ASSERT(rc == 0); 1296 CU_ASSERT(g_io_done == false); 1297 1298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1299 stub_complete_io(1); 1300 CU_ASSERT(g_io_done == true); 1301 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1302 1303 bdev->split_on_optimal_io_boundary = true; 1304 bdev->md_interleave = false; 1305 bdev->md_len = 8; 1306 1307 /* Now test that a single-vector command is split correctly. 1308 * Offset 14, length 8, payload 0xF000 1309 * Child - Offset 14, length 2, payload 0xF000 1310 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1311 * 1312 * Set up the expected values before calling spdk_bdev_read_blocks 1313 */ 1314 g_io_done = false; 1315 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1316 expected_io->md_buf = md_buf; 1317 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1318 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1319 1320 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1321 expected_io->md_buf = md_buf + 2 * 8; 1322 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1323 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1324 1325 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1326 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1327 14, 8, io_done, NULL); 1328 CU_ASSERT(rc == 0); 1329 CU_ASSERT(g_io_done == false); 1330 1331 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1332 stub_complete_io(2); 1333 CU_ASSERT(g_io_done == true); 1334 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1335 1336 /* Now set up a more complex, multi-vector command that needs to be split, 1337 * including splitting iovecs. 1338 */ 1339 iov[0].iov_base = (void *)0x10000; 1340 iov[0].iov_len = 512; 1341 iov[1].iov_base = (void *)0x20000; 1342 iov[1].iov_len = 20 * 512; 1343 iov[2].iov_base = (void *)0x30000; 1344 iov[2].iov_len = 11 * 512; 1345 1346 g_io_done = false; 1347 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1348 expected_io->md_buf = md_buf; 1349 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1350 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1351 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1352 1353 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1354 expected_io->md_buf = md_buf + 2 * 8; 1355 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1356 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1357 1358 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1359 expected_io->md_buf = md_buf + 18 * 8; 1360 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1361 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1362 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1363 1364 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1365 14, 32, io_done, NULL); 1366 CU_ASSERT(rc == 0); 1367 CU_ASSERT(g_io_done == false); 1368 1369 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1370 stub_complete_io(3); 1371 CU_ASSERT(g_io_done == true); 1372 1373 /* Test multi vector command that needs to be split by strip and then needs to be 1374 * split further due to the capacity of child iovs. 1375 */ 1376 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1377 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1378 iov[i].iov_len = 512; 1379 } 1380 1381 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1382 g_io_done = false; 1383 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1384 BDEV_IO_NUM_CHILD_IOV); 1385 expected_io->md_buf = md_buf; 1386 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1387 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1388 } 1389 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1390 1391 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1392 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 1393 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1394 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1395 ut_expected_io_set_iov(expected_io, i, 1396 (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1397 } 1398 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1399 1400 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1401 0, BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1402 CU_ASSERT(rc == 0); 1403 CU_ASSERT(g_io_done == false); 1404 1405 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1406 stub_complete_io(1); 1407 CU_ASSERT(g_io_done == false); 1408 1409 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1410 stub_complete_io(1); 1411 CU_ASSERT(g_io_done == true); 1412 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1413 1414 /* Test multi vector command that needs to be split by strip and then needs to be 1415 * split further due to the capacity of child iovs. In this case, the length of 1416 * the rest of iovec array with an I/O boundary is the multiple of block size. 1417 */ 1418 1419 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1420 * is BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1421 */ 1422 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1423 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1424 iov[i].iov_len = 512; 1425 } 1426 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1427 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1428 iov[i].iov_len = 256; 1429 } 1430 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1431 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1432 1433 /* Add an extra iovec to trigger split */ 1434 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1435 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1436 1437 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1438 g_io_done = false; 1439 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1440 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV); 1441 expected_io->md_buf = md_buf; 1442 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1443 ut_expected_io_set_iov(expected_io, i, 1444 (void *)((i + 1) * 0x10000), 512); 1445 } 1446 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 1447 ut_expected_io_set_iov(expected_io, i, 1448 (void *)((i + 1) * 0x10000), 256); 1449 } 1450 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1451 1452 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1453 1, 1); 1454 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1455 ut_expected_io_set_iov(expected_io, 0, 1456 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1457 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1458 1459 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1460 1, 1); 1461 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1462 ut_expected_io_set_iov(expected_io, 0, 1463 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1464 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1465 1466 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1467 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1468 CU_ASSERT(rc == 0); 1469 CU_ASSERT(g_io_done == false); 1470 1471 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1472 stub_complete_io(1); 1473 CU_ASSERT(g_io_done == false); 1474 1475 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1476 stub_complete_io(2); 1477 CU_ASSERT(g_io_done == true); 1478 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1479 1480 /* Test multi vector command that needs to be split by strip and then needs to be 1481 * split further due to the capacity of child iovs, the child request offset should 1482 * be rewind to last aligned offset and go success without error. 1483 */ 1484 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1485 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1486 iov[i].iov_len = 512; 1487 } 1488 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1489 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1490 1491 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1492 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1493 1494 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1495 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1496 1497 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1498 g_io_done = false; 1499 g_io_status = 0; 1500 /* The first expected io should be start from offset 0 to BDEV_IO_NUM_CHILD_IOV - 1 */ 1501 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1502 BDEV_IO_NUM_CHILD_IOV - 1, BDEV_IO_NUM_CHILD_IOV - 1); 1503 expected_io->md_buf = md_buf; 1504 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1505 ut_expected_io_set_iov(expected_io, i, 1506 (void *)((i + 1) * 0x10000), 512); 1507 } 1508 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1509 /* The second expected io should be start from offset BDEV_IO_NUM_CHILD_IOV - 1 to BDEV_IO_NUM_CHILD_IOV */ 1510 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 1, 1511 1, 2); 1512 expected_io->md_buf = md_buf + (BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1513 ut_expected_io_set_iov(expected_io, 0, 1514 (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1515 ut_expected_io_set_iov(expected_io, 1, 1516 (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1517 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1518 /* The third expected io should be start from offset BDEV_IO_NUM_CHILD_IOV to BDEV_IO_NUM_CHILD_IOV + 1 */ 1519 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1520 1, 1); 1521 expected_io->md_buf = md_buf + BDEV_IO_NUM_CHILD_IOV * 8; 1522 ut_expected_io_set_iov(expected_io, 0, 1523 (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1524 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1525 1526 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1527 0, BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1528 CU_ASSERT(rc == 0); 1529 CU_ASSERT(g_io_done == false); 1530 1531 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1532 stub_complete_io(1); 1533 CU_ASSERT(g_io_done == false); 1534 1535 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1536 stub_complete_io(2); 1537 CU_ASSERT(g_io_done == true); 1538 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1539 1540 /* Test multi vector command that needs to be split due to the IO boundary and 1541 * the capacity of child iovs. Especially test the case when the command is 1542 * split due to the capacity of child iovs, the tail address is not aligned with 1543 * block size and is rewinded to the aligned address. 1544 * 1545 * The iovecs used in read request is complex but is based on the data 1546 * collected in the real issue. We change the base addresses but keep the lengths 1547 * not to loose the credibility of the test. 1548 */ 1549 bdev->optimal_io_boundary = 128; 1550 g_io_done = false; 1551 g_io_status = 0; 1552 1553 for (i = 0; i < 31; i++) { 1554 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1555 iov[i].iov_len = 1024; 1556 } 1557 iov[31].iov_base = (void *)0xFEED1F00000; 1558 iov[31].iov_len = 32768; 1559 iov[32].iov_base = (void *)0xFEED2000000; 1560 iov[32].iov_len = 160; 1561 iov[33].iov_base = (void *)0xFEED2100000; 1562 iov[33].iov_len = 4096; 1563 iov[34].iov_base = (void *)0xFEED2200000; 1564 iov[34].iov_len = 4096; 1565 iov[35].iov_base = (void *)0xFEED2300000; 1566 iov[35].iov_len = 4096; 1567 iov[36].iov_base = (void *)0xFEED2400000; 1568 iov[36].iov_len = 4096; 1569 iov[37].iov_base = (void *)0xFEED2500000; 1570 iov[37].iov_len = 4096; 1571 iov[38].iov_base = (void *)0xFEED2600000; 1572 iov[38].iov_len = 4096; 1573 iov[39].iov_base = (void *)0xFEED2700000; 1574 iov[39].iov_len = 4096; 1575 iov[40].iov_base = (void *)0xFEED2800000; 1576 iov[40].iov_len = 4096; 1577 iov[41].iov_base = (void *)0xFEED2900000; 1578 iov[41].iov_len = 4096; 1579 iov[42].iov_base = (void *)0xFEED2A00000; 1580 iov[42].iov_len = 4096; 1581 iov[43].iov_base = (void *)0xFEED2B00000; 1582 iov[43].iov_len = 12288; 1583 iov[44].iov_base = (void *)0xFEED2C00000; 1584 iov[44].iov_len = 8192; 1585 iov[45].iov_base = (void *)0xFEED2F00000; 1586 iov[45].iov_len = 4096; 1587 iov[46].iov_base = (void *)0xFEED3000000; 1588 iov[46].iov_len = 4096; 1589 iov[47].iov_base = (void *)0xFEED3100000; 1590 iov[47].iov_len = 4096; 1591 iov[48].iov_base = (void *)0xFEED3200000; 1592 iov[48].iov_len = 24576; 1593 iov[49].iov_base = (void *)0xFEED3300000; 1594 iov[49].iov_len = 16384; 1595 iov[50].iov_base = (void *)0xFEED3400000; 1596 iov[50].iov_len = 12288; 1597 iov[51].iov_base = (void *)0xFEED3500000; 1598 iov[51].iov_len = 4096; 1599 iov[52].iov_base = (void *)0xFEED3600000; 1600 iov[52].iov_len = 4096; 1601 iov[53].iov_base = (void *)0xFEED3700000; 1602 iov[53].iov_len = 4096; 1603 iov[54].iov_base = (void *)0xFEED3800000; 1604 iov[54].iov_len = 28672; 1605 iov[55].iov_base = (void *)0xFEED3900000; 1606 iov[55].iov_len = 20480; 1607 iov[56].iov_base = (void *)0xFEED3A00000; 1608 iov[56].iov_len = 4096; 1609 iov[57].iov_base = (void *)0xFEED3B00000; 1610 iov[57].iov_len = 12288; 1611 iov[58].iov_base = (void *)0xFEED3C00000; 1612 iov[58].iov_len = 4096; 1613 iov[59].iov_base = (void *)0xFEED3D00000; 1614 iov[59].iov_len = 4096; 1615 iov[60].iov_base = (void *)0xFEED3E00000; 1616 iov[60].iov_len = 352; 1617 1618 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1619 * of child iovs, 1620 */ 1621 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1622 expected_io->md_buf = md_buf; 1623 for (i = 0; i < 32; i++) { 1624 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1625 } 1626 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1627 1628 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1629 * split by the IO boundary requirement. 1630 */ 1631 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1632 expected_io->md_buf = md_buf + 126 * 8; 1633 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1634 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1635 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1636 1637 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1638 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1639 */ 1640 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1641 expected_io->md_buf = md_buf + 128 * 8; 1642 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1643 iov[33].iov_len - 864); 1644 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1645 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1646 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1647 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1648 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1649 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1650 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1651 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1652 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1653 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1654 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1655 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1656 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1657 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1658 1659 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1660 * first 864 bytes of iov[52] split by the IO boundary requirement. 1661 */ 1662 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1663 expected_io->md_buf = md_buf + 256 * 8; 1664 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1665 iov[46].iov_len - 864); 1666 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1667 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1668 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1669 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1670 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1671 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1672 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1673 1674 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1675 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1676 */ 1677 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1678 expected_io->md_buf = md_buf + 384 * 8; 1679 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1680 iov[52].iov_len - 864); 1681 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1682 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1683 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1684 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1685 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1686 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1687 1688 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1689 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1690 */ 1691 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1692 expected_io->md_buf = md_buf + 512 * 8; 1693 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1694 iov[57].iov_len - 4960); 1695 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1696 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1697 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1698 1699 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1701 expected_io->md_buf = md_buf + 542 * 8; 1702 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1703 iov[59].iov_len - 3936); 1704 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1705 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1706 1707 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1708 0, 543, io_done, NULL); 1709 CU_ASSERT(rc == 0); 1710 CU_ASSERT(g_io_done == false); 1711 1712 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1713 stub_complete_io(1); 1714 CU_ASSERT(g_io_done == false); 1715 1716 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1717 stub_complete_io(5); 1718 CU_ASSERT(g_io_done == false); 1719 1720 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1721 stub_complete_io(1); 1722 CU_ASSERT(g_io_done == true); 1723 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1724 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1725 1726 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1727 * split, so test that. 1728 */ 1729 bdev->optimal_io_boundary = 15; 1730 g_io_done = false; 1731 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1732 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1733 1734 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1735 CU_ASSERT(rc == 0); 1736 CU_ASSERT(g_io_done == false); 1737 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1738 stub_complete_io(1); 1739 CU_ASSERT(g_io_done == true); 1740 1741 /* Test an UNMAP. This should also not be split. */ 1742 bdev->optimal_io_boundary = 16; 1743 g_io_done = false; 1744 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1745 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1746 1747 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1748 CU_ASSERT(rc == 0); 1749 CU_ASSERT(g_io_done == false); 1750 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1751 stub_complete_io(1); 1752 CU_ASSERT(g_io_done == true); 1753 1754 /* Test a FLUSH. This should also not be split. */ 1755 bdev->optimal_io_boundary = 16; 1756 g_io_done = false; 1757 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1758 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1759 1760 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1761 CU_ASSERT(rc == 0); 1762 CU_ASSERT(g_io_done == false); 1763 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1764 stub_complete_io(1); 1765 CU_ASSERT(g_io_done == true); 1766 1767 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1768 1769 /* Children requests return an error status */ 1770 bdev->optimal_io_boundary = 16; 1771 iov[0].iov_base = (void *)0x10000; 1772 iov[0].iov_len = 512 * 64; 1773 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1774 g_io_done = false; 1775 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1776 1777 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1778 CU_ASSERT(rc == 0); 1779 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1780 stub_complete_io(4); 1781 CU_ASSERT(g_io_done == false); 1782 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1783 stub_complete_io(1); 1784 CU_ASSERT(g_io_done == true); 1785 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1786 1787 /* Test if a multi vector command terminated with failure before continuing 1788 * splitting process when one of child I/O failed. 1789 * The multi vector command is as same as the above that needs to be split by strip 1790 * and then needs to be split further due to the capacity of child iovs. 1791 */ 1792 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1793 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1794 iov[i].iov_len = 512; 1795 } 1796 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000); 1797 iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1798 1799 iov[BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1800 iov[BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1801 1802 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1803 iov[BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1804 1805 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 1806 1807 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1808 g_io_done = false; 1809 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1810 1811 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 1812 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1813 CU_ASSERT(rc == 0); 1814 CU_ASSERT(g_io_done == false); 1815 1816 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1817 stub_complete_io(1); 1818 CU_ASSERT(g_io_done == true); 1819 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1820 1821 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1822 1823 /* for this test we will create the following conditions to hit the code path where 1824 * we are trying to send and IO following a split that has no iovs because we had to 1825 * trim them for alignment reasons. 1826 * 1827 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1828 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1829 * position 30 and overshoot by 0x2e. 1830 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1831 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1832 * which eliniates that vector so we just send the first split IO with 30 vectors 1833 * and let the completion pick up the last 2 vectors. 1834 */ 1835 bdev->optimal_io_boundary = 32; 1836 bdev->split_on_optimal_io_boundary = true; 1837 g_io_done = false; 1838 1839 /* Init all parent IOVs to 0x212 */ 1840 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1841 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1842 iov[i].iov_len = 0x212; 1843 } 1844 1845 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV, 1846 BDEV_IO_NUM_CHILD_IOV - 1); 1847 /* expect 0-29 to be 1:1 with the parent iov */ 1848 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1849 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1850 } 1851 1852 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1853 * where 0x1e is the amount we overshot the 16K boundary 1854 */ 1855 ut_expected_io_set_iov(expected_io, BDEV_IO_NUM_CHILD_IOV - 2, 1856 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 1857 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1858 1859 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 1860 * shortened that take it to the next boundary and then a final one to get us to 1861 * 0x4200 bytes for the IO. 1862 */ 1863 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 1864 BDEV_IO_NUM_CHILD_IOV, 2); 1865 /* position 30 picked up the remaining bytes to the next boundary */ 1866 ut_expected_io_set_iov(expected_io, 0, 1867 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 1868 1869 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 1870 ut_expected_io_set_iov(expected_io, 1, 1871 (void *)(iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 1872 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1873 1874 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 1, 0, 1875 BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1876 CU_ASSERT(rc == 0); 1877 CU_ASSERT(g_io_done == false); 1878 1879 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1880 stub_complete_io(1); 1881 CU_ASSERT(g_io_done == false); 1882 1883 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1884 stub_complete_io(1); 1885 CU_ASSERT(g_io_done == true); 1886 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1887 1888 spdk_put_io_channel(io_ch); 1889 spdk_bdev_close(desc); 1890 free_bdev(bdev); 1891 spdk_bdev_finish(bdev_fini_cb, NULL); 1892 poll_threads(); 1893 } 1894 1895 static void 1896 bdev_io_max_size_and_segment_split_test(void) 1897 { 1898 struct spdk_bdev *bdev; 1899 struct spdk_bdev_desc *desc = NULL; 1900 struct spdk_io_channel *io_ch; 1901 struct spdk_bdev_opts bdev_opts = {}; 1902 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 1903 struct ut_expected_io *expected_io; 1904 uint64_t i; 1905 int rc; 1906 1907 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1908 bdev_opts.bdev_io_pool_size = 512; 1909 bdev_opts.bdev_io_cache_size = 64; 1910 1911 bdev_opts.opts_size = sizeof(bdev_opts); 1912 rc = spdk_bdev_set_opts(&bdev_opts); 1913 CU_ASSERT(rc == 0); 1914 spdk_bdev_initialize(bdev_init_cb, NULL); 1915 1916 bdev = allocate_bdev("bdev0"); 1917 1918 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 1919 CU_ASSERT(rc == 0); 1920 SPDK_CU_ASSERT_FATAL(desc != NULL); 1921 io_ch = spdk_bdev_get_io_channel(desc); 1922 CU_ASSERT(io_ch != NULL); 1923 1924 bdev->split_on_optimal_io_boundary = false; 1925 bdev->optimal_io_boundary = 0; 1926 1927 /* Case 0 max_num_segments == 0. 1928 * but segment size 2 * 512 > 512 1929 */ 1930 bdev->max_segment_size = 512; 1931 bdev->max_num_segments = 0; 1932 g_io_done = false; 1933 1934 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 1935 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1936 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 1937 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1938 1939 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1940 CU_ASSERT(rc == 0); 1941 CU_ASSERT(g_io_done == false); 1942 1943 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1944 stub_complete_io(1); 1945 CU_ASSERT(g_io_done == true); 1946 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1947 1948 /* Case 1 max_segment_size == 0 1949 * but iov num 2 > 1. 1950 */ 1951 bdev->max_segment_size = 0; 1952 bdev->max_num_segments = 1; 1953 g_io_done = false; 1954 1955 iov[0].iov_base = (void *)0x10000; 1956 iov[0].iov_len = 512; 1957 iov[1].iov_base = (void *)0x20000; 1958 iov[1].iov_len = 8 * 512; 1959 1960 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1961 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 1962 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1963 1964 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 1965 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 1966 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1967 1968 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 1969 CU_ASSERT(rc == 0); 1970 CU_ASSERT(g_io_done == false); 1971 1972 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1973 stub_complete_io(2); 1974 CU_ASSERT(g_io_done == true); 1975 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1976 1977 /* Test that a non-vector command is split correctly. 1978 * Set up the expected values before calling spdk_bdev_read_blocks 1979 */ 1980 bdev->max_segment_size = 512; 1981 bdev->max_num_segments = 1; 1982 g_io_done = false; 1983 1984 /* Child IO 0 */ 1985 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 1986 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 1987 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1988 1989 /* Child IO 1 */ 1990 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 1991 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 1992 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1993 1994 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1995 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 1996 CU_ASSERT(rc == 0); 1997 CU_ASSERT(g_io_done == false); 1998 1999 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2000 stub_complete_io(2); 2001 CU_ASSERT(g_io_done == true); 2002 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2003 2004 /* Now set up a more complex, multi-vector command that needs to be split, 2005 * including splitting iovecs. 2006 */ 2007 bdev->max_segment_size = 2 * 512; 2008 bdev->max_num_segments = 1; 2009 g_io_done = false; 2010 2011 iov[0].iov_base = (void *)0x10000; 2012 iov[0].iov_len = 2 * 512; 2013 iov[1].iov_base = (void *)0x20000; 2014 iov[1].iov_len = 4 * 512; 2015 iov[2].iov_base = (void *)0x30000; 2016 iov[2].iov_len = 6 * 512; 2017 2018 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2019 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2020 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2021 2022 /* Split iov[1].size to 2 iov entries then split the segments */ 2023 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2024 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2025 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2026 2027 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2028 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2029 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2030 2031 /* Split iov[2].size to 3 iov entries then split the segments */ 2032 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2033 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2034 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2035 2036 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2037 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2038 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2039 2040 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2041 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2042 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2043 2044 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2045 CU_ASSERT(rc == 0); 2046 CU_ASSERT(g_io_done == false); 2047 2048 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2049 stub_complete_io(6); 2050 CU_ASSERT(g_io_done == true); 2051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2052 2053 /* Test multi vector command that needs to be split by strip and then needs to be 2054 * split further due to the capacity of parent IO child iovs. 2055 */ 2056 bdev->max_segment_size = 512; 2057 bdev->max_num_segments = 1; 2058 g_io_done = false; 2059 2060 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2061 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2062 iov[i].iov_len = 512 * 2; 2063 } 2064 2065 /* Each input iov.size is split into 2 iovs, 2066 * half of the input iov can fill all child iov entries of a single IO. 2067 */ 2068 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2069 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2070 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2071 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2072 2073 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2074 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2075 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2076 } 2077 2078 /* The remaining iov is split in the second round */ 2079 for (i = BDEV_IO_NUM_CHILD_IOV / 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2080 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2081 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2082 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2083 2084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2085 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2087 } 2088 2089 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2090 BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2091 CU_ASSERT(rc == 0); 2092 CU_ASSERT(g_io_done == false); 2093 2094 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2095 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2096 CU_ASSERT(g_io_done == false); 2097 2098 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == BDEV_IO_NUM_CHILD_IOV); 2099 stub_complete_io(BDEV_IO_NUM_CHILD_IOV); 2100 CU_ASSERT(g_io_done == true); 2101 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2102 2103 /* A wrong case, a child IO that is divided does 2104 * not meet the principle of multiples of block size, 2105 * and exits with error 2106 */ 2107 bdev->max_segment_size = 512; 2108 bdev->max_num_segments = 1; 2109 g_io_done = false; 2110 2111 iov[0].iov_base = (void *)0x10000; 2112 iov[0].iov_len = 512 + 256; 2113 iov[1].iov_base = (void *)0x20000; 2114 iov[1].iov_len = 256; 2115 2116 /* iov[0] is split to 512 and 256. 2117 * 256 is less than a block size, and it is found 2118 * in the next round of split that it is the first child IO smaller than 2119 * the block size, so the error exit 2120 */ 2121 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2122 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2123 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2124 2125 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2126 CU_ASSERT(rc == 0); 2127 CU_ASSERT(g_io_done == false); 2128 2129 /* First child IO is OK */ 2130 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2131 stub_complete_io(1); 2132 CU_ASSERT(g_io_done == true); 2133 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2134 2135 /* error exit */ 2136 stub_complete_io(1); 2137 CU_ASSERT(g_io_done == true); 2138 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2140 2141 /* Test multi vector command that needs to be split by strip and then needs to be 2142 * split further due to the capacity of child iovs. 2143 * 2144 * In this case, the last two iovs need to be split, but it will exceed the capacity 2145 * of child iovs, so it needs to wait until the first batch completed. 2146 */ 2147 bdev->max_segment_size = 512; 2148 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2149 g_io_done = false; 2150 2151 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2152 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2153 iov[i].iov_len = 512; 2154 } 2155 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV; i++) { 2156 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2157 iov[i].iov_len = 512 * 2; 2158 } 2159 2160 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2161 BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV); 2162 /* 0 ~ (BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2163 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2164 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2165 } 2166 /* (BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2167 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2168 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2169 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2170 2171 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2172 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV, 2, 2); 2173 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2174 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2175 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2176 2177 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV, 0, 2178 BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2179 CU_ASSERT(rc == 0); 2180 CU_ASSERT(g_io_done == false); 2181 2182 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2183 stub_complete_io(1); 2184 CU_ASSERT(g_io_done == false); 2185 2186 /* Next round */ 2187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2188 stub_complete_io(1); 2189 CU_ASSERT(g_io_done == true); 2190 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2191 2192 /* This case is similar to the previous one, but the io composed of 2193 * the last few entries of child iov is not enough for a blocklen, so they 2194 * cannot be put into this IO, but wait until the next time. 2195 */ 2196 bdev->max_segment_size = 512; 2197 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2198 g_io_done = false; 2199 2200 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2201 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2202 iov[i].iov_len = 512; 2203 } 2204 2205 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2206 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2207 iov[i].iov_len = 128; 2208 } 2209 2210 /* First child iovcnt is't BDEV_IO_NUM_CHILD_IOV but BDEV_IO_NUM_CHILD_IOV - 2. 2211 * Because the left 2 iov is not enough for a blocklen. 2212 */ 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2214 BDEV_IO_NUM_CHILD_IOV - 2, BDEV_IO_NUM_CHILD_IOV - 2); 2215 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2216 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2217 } 2218 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2219 2220 /* The second child io waits until the end of the first child io before executing. 2221 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2222 * BDEV_IO_NUM_CHILD_IOV - 2 to BDEV_IO_NUM_CHILD_IOV + 2 2223 */ 2224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV - 2, 2225 1, 4); 2226 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2227 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2228 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2229 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2230 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2231 2232 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2233 BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2234 CU_ASSERT(rc == 0); 2235 CU_ASSERT(g_io_done == false); 2236 2237 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2238 stub_complete_io(1); 2239 CU_ASSERT(g_io_done == false); 2240 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2242 stub_complete_io(1); 2243 CU_ASSERT(g_io_done == true); 2244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2245 2246 /* A very complicated case. Each sg entry exceeds max_segment_size and 2247 * needs to be split. At the same time, child io must be a multiple of blocklen. 2248 * At the same time, child iovcnt exceeds parent iovcnt. 2249 */ 2250 bdev->max_segment_size = 512 + 128; 2251 bdev->max_num_segments = 3; 2252 g_io_done = false; 2253 2254 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2255 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2256 iov[i].iov_len = 512 + 256; 2257 } 2258 2259 for (i = BDEV_IO_NUM_CHILD_IOV - 2; i < BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2260 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2261 iov[i].iov_len = 512 + 128; 2262 } 2263 2264 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2265 * Consume 4 parent IO iov entries per for() round and 6 block size. 2266 * Generate 9 child IOs. 2267 */ 2268 for (i = 0; i < 3; i++) { 2269 uint32_t j = i * 4; 2270 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2271 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2272 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2273 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2274 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2275 2276 /* Child io must be a multiple of blocklen 2277 * iov[j + 2] must be split. If the third entry is also added, 2278 * the multiple of blocklen cannot be guaranteed. But it still 2279 * occupies one iov entry of the parent child iov. 2280 */ 2281 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2282 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2283 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2284 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2285 2286 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2287 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2288 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2289 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2290 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2291 } 2292 2293 /* Child iov position at 27, the 10th child IO 2294 * iov entry index is 3 * 4 and offset is 3 * 6 2295 */ 2296 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2297 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2298 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2299 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2300 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2301 2302 /* Child iov position at 30, the 11th child IO */ 2303 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2304 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2305 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2306 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2307 2308 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2309 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2310 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2311 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2312 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2313 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2314 2315 /* Consume 9 child IOs and 27 child iov entries. 2316 * Consume 4 parent IO iov entries per for() round and 6 block size. 2317 * Parent IO iov index start from 16 and block offset start from 24 2318 */ 2319 for (i = 0; i < 3; i++) { 2320 uint32_t j = i * 4 + 16; 2321 uint32_t offset = i * 6 + 24; 2322 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2323 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2324 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2325 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2326 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2327 2328 /* Child io must be a multiple of blocklen 2329 * iov[j + 2] must be split. If the third entry is also added, 2330 * the multiple of blocklen cannot be guaranteed. But it still 2331 * occupies one iov entry of the parent child iov. 2332 */ 2333 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2334 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2335 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2336 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2337 2338 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2339 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2340 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2341 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2342 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2343 } 2344 2345 /* The 22th child IO, child iov position at 30 */ 2346 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2347 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2348 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2349 2350 /* The third round */ 2351 /* Here is the 23nd child IO and child iovpos is 0 */ 2352 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2353 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2354 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2355 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2356 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2357 2358 /* The 24th child IO */ 2359 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2360 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2361 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2362 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2363 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2364 2365 /* The 25th child IO */ 2366 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2367 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2368 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2369 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2370 2371 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV + 2, 0, 2372 50, io_done, NULL); 2373 CU_ASSERT(rc == 0); 2374 CU_ASSERT(g_io_done == false); 2375 2376 /* Parent IO supports up to 32 child iovs, so it is calculated that 2377 * a maximum of 11 IOs can be split at a time, and the 2378 * splitting will continue after the first batch is over. 2379 */ 2380 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2381 stub_complete_io(11); 2382 CU_ASSERT(g_io_done == false); 2383 2384 /* The 2nd round */ 2385 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2386 stub_complete_io(11); 2387 CU_ASSERT(g_io_done == false); 2388 2389 /* The last round */ 2390 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2391 stub_complete_io(3); 2392 CU_ASSERT(g_io_done == true); 2393 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2394 2395 /* Test an WRITE_ZEROES. This should also not be split. */ 2396 bdev->max_segment_size = 512; 2397 bdev->max_num_segments = 1; 2398 g_io_done = false; 2399 2400 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2401 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2402 2403 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2404 CU_ASSERT(rc == 0); 2405 CU_ASSERT(g_io_done == false); 2406 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2407 stub_complete_io(1); 2408 CU_ASSERT(g_io_done == true); 2409 2410 /* Test an UNMAP. This should also not be split. */ 2411 g_io_done = false; 2412 2413 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2417 CU_ASSERT(rc == 0); 2418 CU_ASSERT(g_io_done == false); 2419 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2420 stub_complete_io(1); 2421 CU_ASSERT(g_io_done == true); 2422 2423 /* Test a FLUSH. This should also not be split. */ 2424 g_io_done = false; 2425 2426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2427 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2428 2429 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2430 CU_ASSERT(rc == 0); 2431 CU_ASSERT(g_io_done == false); 2432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2433 stub_complete_io(1); 2434 CU_ASSERT(g_io_done == true); 2435 2436 spdk_put_io_channel(io_ch); 2437 spdk_bdev_close(desc); 2438 free_bdev(bdev); 2439 spdk_bdev_finish(bdev_fini_cb, NULL); 2440 poll_threads(); 2441 } 2442 2443 static void 2444 bdev_io_mix_split_test(void) 2445 { 2446 struct spdk_bdev *bdev; 2447 struct spdk_bdev_desc *desc = NULL; 2448 struct spdk_io_channel *io_ch; 2449 struct spdk_bdev_opts bdev_opts = {}; 2450 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 2451 struct ut_expected_io *expected_io; 2452 uint64_t i; 2453 int rc; 2454 2455 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2456 bdev_opts.bdev_io_pool_size = 512; 2457 bdev_opts.bdev_io_cache_size = 64; 2458 2459 rc = spdk_bdev_set_opts(&bdev_opts); 2460 CU_ASSERT(rc == 0); 2461 spdk_bdev_initialize(bdev_init_cb, NULL); 2462 2463 bdev = allocate_bdev("bdev0"); 2464 2465 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2466 CU_ASSERT(rc == 0); 2467 SPDK_CU_ASSERT_FATAL(desc != NULL); 2468 io_ch = spdk_bdev_get_io_channel(desc); 2469 CU_ASSERT(io_ch != NULL); 2470 2471 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2472 bdev->split_on_optimal_io_boundary = true; 2473 bdev->optimal_io_boundary = 16; 2474 2475 bdev->max_segment_size = 512; 2476 bdev->max_num_segments = 16; 2477 g_io_done = false; 2478 2479 /* IO crossing the IO boundary requires split 2480 * Total 2 child IOs. 2481 */ 2482 2483 /* The 1st child IO split the segment_size to multiple segment entry */ 2484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2485 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2486 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2487 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2488 2489 /* The 2nd child IO split the segment_size to multiple segment entry */ 2490 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2491 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2492 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2493 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2494 2495 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2496 CU_ASSERT(rc == 0); 2497 CU_ASSERT(g_io_done == false); 2498 2499 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2500 stub_complete_io(2); 2501 CU_ASSERT(g_io_done == true); 2502 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2503 2504 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2505 bdev->max_segment_size = 15 * 512; 2506 bdev->max_num_segments = 1; 2507 g_io_done = false; 2508 2509 /* IO crossing the IO boundary requires split. 2510 * The 1st child IO segment size exceeds the max_segment_size, 2511 * So 1st child IO will be splitted to multiple segment entry. 2512 * Then it split to 2 child IOs because of the max_num_segments. 2513 * Total 3 child IOs. 2514 */ 2515 2516 /* The first 2 IOs are in an IO boundary. 2517 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2518 * So it split to the first 2 IOs. 2519 */ 2520 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2521 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2522 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2523 2524 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2525 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2526 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2527 2528 /* The 3rd Child IO is because of the io boundary */ 2529 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2530 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2531 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2532 2533 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2534 CU_ASSERT(rc == 0); 2535 CU_ASSERT(g_io_done == false); 2536 2537 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2538 stub_complete_io(3); 2539 CU_ASSERT(g_io_done == true); 2540 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2541 2542 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2543 bdev->max_segment_size = 17 * 512; 2544 bdev->max_num_segments = 1; 2545 g_io_done = false; 2546 2547 /* IO crossing the IO boundary requires split. 2548 * Child IO does not split. 2549 * Total 2 child IOs. 2550 */ 2551 2552 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2553 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2557 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2558 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2559 2560 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2561 CU_ASSERT(rc == 0); 2562 CU_ASSERT(g_io_done == false); 2563 2564 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2565 stub_complete_io(2); 2566 CU_ASSERT(g_io_done == true); 2567 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2568 2569 /* Now set up a more complex, multi-vector command that needs to be split, 2570 * including splitting iovecs. 2571 * optimal_io_boundary < max_segment_size * max_num_segments 2572 */ 2573 bdev->max_segment_size = 3 * 512; 2574 bdev->max_num_segments = 6; 2575 g_io_done = false; 2576 2577 iov[0].iov_base = (void *)0x10000; 2578 iov[0].iov_len = 4 * 512; 2579 iov[1].iov_base = (void *)0x20000; 2580 iov[1].iov_len = 4 * 512; 2581 iov[2].iov_base = (void *)0x30000; 2582 iov[2].iov_len = 10 * 512; 2583 2584 /* IO crossing the IO boundary requires split. 2585 * The 1st child IO segment size exceeds the max_segment_size and after 2586 * splitting segment_size, the num_segments exceeds max_num_segments. 2587 * So 1st child IO will be splitted to 2 child IOs. 2588 * Total 3 child IOs. 2589 */ 2590 2591 /* The first 2 IOs are in an IO boundary. 2592 * After splitting segment size the segment num exceeds. 2593 * So it splits to 2 child IOs. 2594 */ 2595 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2596 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2597 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2598 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2599 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2600 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2601 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2603 2604 /* The 2nd child IO has the left segment entry */ 2605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2606 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2607 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2608 2609 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2610 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2611 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2612 2613 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2614 CU_ASSERT(rc == 0); 2615 CU_ASSERT(g_io_done == false); 2616 2617 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2618 stub_complete_io(3); 2619 CU_ASSERT(g_io_done == true); 2620 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2621 2622 /* A very complicated case. Each sg entry exceeds max_segment_size 2623 * and split on io boundary. 2624 * optimal_io_boundary < max_segment_size * max_num_segments 2625 */ 2626 bdev->max_segment_size = 3 * 512; 2627 bdev->max_num_segments = BDEV_IO_NUM_CHILD_IOV; 2628 g_io_done = false; 2629 2630 for (i = 0; i < 20; i++) { 2631 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2632 iov[i].iov_len = 512 * 4; 2633 } 2634 2635 /* IO crossing the IO boundary requires split. 2636 * 80 block length can split 5 child IOs base on offset and IO boundary. 2637 * Each iov entry needs to be splitted to 2 entries because of max_segment_size 2638 * Total 5 child IOs. 2639 */ 2640 2641 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2642 * So each child IO occupies 8 child iov entries. 2643 */ 2644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2645 for (i = 0; i < 4; i++) { 2646 int iovcnt = i * 2; 2647 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2648 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2649 } 2650 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2651 2652 /* 2nd child IO and total 16 child iov entries of parent IO */ 2653 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2654 for (i = 4; i < 8; i++) { 2655 int iovcnt = (i - 4) * 2; 2656 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2657 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2658 } 2659 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2660 2661 /* 3rd child IO and total 24 child iov entries of parent IO */ 2662 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2663 for (i = 8; i < 12; i++) { 2664 int iovcnt = (i - 8) * 2; 2665 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2666 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2667 } 2668 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2669 2670 /* 4th child IO and total 32 child iov entries of parent IO */ 2671 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2672 for (i = 12; i < 16; i++) { 2673 int iovcnt = (i - 12) * 2; 2674 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2675 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2676 } 2677 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2678 2679 /* 5th child IO and because of the child iov entry it should be splitted 2680 * in next round. 2681 */ 2682 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2683 for (i = 16; i < 20; i++) { 2684 int iovcnt = (i - 16) * 2; 2685 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2686 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2687 } 2688 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2689 2690 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2691 CU_ASSERT(rc == 0); 2692 CU_ASSERT(g_io_done == false); 2693 2694 /* First split round */ 2695 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2696 stub_complete_io(4); 2697 CU_ASSERT(g_io_done == false); 2698 2699 /* Second split round */ 2700 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2701 stub_complete_io(1); 2702 CU_ASSERT(g_io_done == true); 2703 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2704 2705 spdk_put_io_channel(io_ch); 2706 spdk_bdev_close(desc); 2707 free_bdev(bdev); 2708 spdk_bdev_finish(bdev_fini_cb, NULL); 2709 poll_threads(); 2710 } 2711 2712 static void 2713 bdev_io_split_with_io_wait(void) 2714 { 2715 struct spdk_bdev *bdev; 2716 struct spdk_bdev_desc *desc = NULL; 2717 struct spdk_io_channel *io_ch; 2718 struct spdk_bdev_channel *channel; 2719 struct spdk_bdev_mgmt_channel *mgmt_ch; 2720 struct spdk_bdev_opts bdev_opts = {}; 2721 struct iovec iov[3]; 2722 struct ut_expected_io *expected_io; 2723 int rc; 2724 2725 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2726 bdev_opts.bdev_io_pool_size = 2; 2727 bdev_opts.bdev_io_cache_size = 1; 2728 2729 rc = spdk_bdev_set_opts(&bdev_opts); 2730 CU_ASSERT(rc == 0); 2731 spdk_bdev_initialize(bdev_init_cb, NULL); 2732 2733 bdev = allocate_bdev("bdev0"); 2734 2735 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2736 CU_ASSERT(rc == 0); 2737 CU_ASSERT(desc != NULL); 2738 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2739 io_ch = spdk_bdev_get_io_channel(desc); 2740 CU_ASSERT(io_ch != NULL); 2741 channel = spdk_io_channel_get_ctx(io_ch); 2742 mgmt_ch = channel->shared_resource->mgmt_ch; 2743 2744 bdev->optimal_io_boundary = 16; 2745 bdev->split_on_optimal_io_boundary = true; 2746 2747 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 2748 CU_ASSERT(rc == 0); 2749 2750 /* Now test that a single-vector command is split correctly. 2751 * Offset 14, length 8, payload 0xF000 2752 * Child - Offset 14, length 2, payload 0xF000 2753 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 2754 * 2755 * Set up the expected values before calling spdk_bdev_read_blocks 2756 */ 2757 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 2758 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 2759 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2760 2761 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 2762 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 2763 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2764 2765 /* The following children will be submitted sequentially due to the capacity of 2766 * spdk_bdev_io. 2767 */ 2768 2769 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 2770 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 2771 CU_ASSERT(rc == 0); 2772 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2773 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2774 2775 /* Completing the first read I/O will submit the first child */ 2776 stub_complete_io(1); 2777 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 2778 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2779 2780 /* Completing the first child will submit the second child */ 2781 stub_complete_io(1); 2782 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2783 2784 /* Complete the second child I/O. This should result in our callback getting 2785 * invoked since the parent I/O is now complete. 2786 */ 2787 stub_complete_io(1); 2788 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2789 2790 /* Now set up a more complex, multi-vector command that needs to be split, 2791 * including splitting iovecs. 2792 */ 2793 iov[0].iov_base = (void *)0x10000; 2794 iov[0].iov_len = 512; 2795 iov[1].iov_base = (void *)0x20000; 2796 iov[1].iov_len = 20 * 512; 2797 iov[2].iov_base = (void *)0x30000; 2798 iov[2].iov_len = 11 * 512; 2799 2800 g_io_done = false; 2801 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 2802 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 2803 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 2804 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2805 2806 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 2807 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 2808 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2809 2810 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 2811 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 2812 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 2813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2814 2815 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 2816 CU_ASSERT(rc == 0); 2817 CU_ASSERT(g_io_done == false); 2818 2819 /* The following children will be submitted sequentially due to the capacity of 2820 * spdk_bdev_io. 2821 */ 2822 2823 /* Completing the first child will submit the second child */ 2824 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2825 stub_complete_io(1); 2826 CU_ASSERT(g_io_done == false); 2827 2828 /* Completing the second child will submit the third child */ 2829 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2830 stub_complete_io(1); 2831 CU_ASSERT(g_io_done == false); 2832 2833 /* Completing the third child will result in our callback getting invoked 2834 * since the parent I/O is now complete. 2835 */ 2836 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2837 stub_complete_io(1); 2838 CU_ASSERT(g_io_done == true); 2839 2840 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 2841 2842 spdk_put_io_channel(io_ch); 2843 spdk_bdev_close(desc); 2844 free_bdev(bdev); 2845 spdk_bdev_finish(bdev_fini_cb, NULL); 2846 poll_threads(); 2847 } 2848 2849 static void 2850 bdev_io_alignment(void) 2851 { 2852 struct spdk_bdev *bdev; 2853 struct spdk_bdev_desc *desc = NULL; 2854 struct spdk_io_channel *io_ch; 2855 struct spdk_bdev_opts bdev_opts = {}; 2856 int rc; 2857 void *buf = NULL; 2858 struct iovec iovs[2]; 2859 int iovcnt; 2860 uint64_t alignment; 2861 2862 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2863 bdev_opts.bdev_io_pool_size = 20; 2864 bdev_opts.bdev_io_cache_size = 2; 2865 2866 rc = spdk_bdev_set_opts(&bdev_opts); 2867 CU_ASSERT(rc == 0); 2868 spdk_bdev_initialize(bdev_init_cb, NULL); 2869 2870 fn_table.submit_request = stub_submit_request_get_buf; 2871 bdev = allocate_bdev("bdev0"); 2872 2873 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 2874 CU_ASSERT(rc == 0); 2875 CU_ASSERT(desc != NULL); 2876 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 2877 io_ch = spdk_bdev_get_io_channel(desc); 2878 CU_ASSERT(io_ch != NULL); 2879 2880 /* Create aligned buffer */ 2881 rc = posix_memalign(&buf, 4096, 8192); 2882 SPDK_CU_ASSERT_FATAL(rc == 0); 2883 2884 /* Pass aligned single buffer with no alignment required */ 2885 alignment = 1; 2886 bdev->required_alignment = spdk_u32log2(alignment); 2887 2888 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2889 CU_ASSERT(rc == 0); 2890 stub_complete_io(1); 2891 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2892 alignment)); 2893 2894 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 2895 CU_ASSERT(rc == 0); 2896 stub_complete_io(1); 2897 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2898 alignment)); 2899 2900 /* Pass unaligned single buffer with no alignment required */ 2901 alignment = 1; 2902 bdev->required_alignment = spdk_u32log2(alignment); 2903 2904 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2905 CU_ASSERT(rc == 0); 2906 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2907 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2908 stub_complete_io(1); 2909 2910 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2911 CU_ASSERT(rc == 0); 2912 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2913 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 2914 stub_complete_io(1); 2915 2916 /* Pass unaligned single buffer with 512 alignment required */ 2917 alignment = 512; 2918 bdev->required_alignment = spdk_u32log2(alignment); 2919 2920 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2921 CU_ASSERT(rc == 0); 2922 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2923 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2924 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2925 alignment)); 2926 stub_complete_io(1); 2927 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2928 2929 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 2930 CU_ASSERT(rc == 0); 2931 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2932 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2933 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2934 alignment)); 2935 stub_complete_io(1); 2936 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2937 2938 /* Pass unaligned single buffer with 4096 alignment required */ 2939 alignment = 4096; 2940 bdev->required_alignment = spdk_u32log2(alignment); 2941 2942 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2943 CU_ASSERT(rc == 0); 2944 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2945 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2946 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2947 alignment)); 2948 stub_complete_io(1); 2949 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2950 2951 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 2952 CU_ASSERT(rc == 0); 2953 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1); 2954 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 2955 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 2956 alignment)); 2957 stub_complete_io(1); 2958 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2959 2960 /* Pass aligned iovs with no alignment required */ 2961 alignment = 1; 2962 bdev->required_alignment = spdk_u32log2(alignment); 2963 2964 iovcnt = 1; 2965 iovs[0].iov_base = buf; 2966 iovs[0].iov_len = 512; 2967 2968 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2969 CU_ASSERT(rc == 0); 2970 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2971 stub_complete_io(1); 2972 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2973 2974 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2975 CU_ASSERT(rc == 0); 2976 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2977 stub_complete_io(1); 2978 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2979 2980 /* Pass unaligned iovs with no alignment required */ 2981 alignment = 1; 2982 bdev->required_alignment = spdk_u32log2(alignment); 2983 2984 iovcnt = 2; 2985 iovs[0].iov_base = buf + 16; 2986 iovs[0].iov_len = 256; 2987 iovs[1].iov_base = buf + 16 + 256 + 32; 2988 iovs[1].iov_len = 256; 2989 2990 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2991 CU_ASSERT(rc == 0); 2992 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2993 stub_complete_io(1); 2994 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 2995 2996 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 2997 CU_ASSERT(rc == 0); 2998 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 2999 stub_complete_io(1); 3000 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3001 3002 /* Pass unaligned iov with 2048 alignment required */ 3003 alignment = 2048; 3004 bdev->required_alignment = spdk_u32log2(alignment); 3005 3006 iovcnt = 2; 3007 iovs[0].iov_base = buf + 16; 3008 iovs[0].iov_len = 256; 3009 iovs[1].iov_base = buf + 16 + 256 + 32; 3010 iovs[1].iov_len = 256; 3011 3012 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3013 CU_ASSERT(rc == 0); 3014 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3015 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3016 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3017 alignment)); 3018 stub_complete_io(1); 3019 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3020 3021 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3022 CU_ASSERT(rc == 0); 3023 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt); 3024 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov); 3025 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3026 alignment)); 3027 stub_complete_io(1); 3028 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3029 3030 /* Pass iov without allocated buffer without alignment required */ 3031 alignment = 1; 3032 bdev->required_alignment = spdk_u32log2(alignment); 3033 3034 iovcnt = 1; 3035 iovs[0].iov_base = NULL; 3036 iovs[0].iov_len = 0; 3037 3038 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3039 CU_ASSERT(rc == 0); 3040 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3041 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3042 alignment)); 3043 stub_complete_io(1); 3044 3045 /* Pass iov without allocated buffer with 1024 alignment required */ 3046 alignment = 1024; 3047 bdev->required_alignment = spdk_u32log2(alignment); 3048 3049 iovcnt = 1; 3050 iovs[0].iov_base = NULL; 3051 iovs[0].iov_len = 0; 3052 3053 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3054 CU_ASSERT(rc == 0); 3055 CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0); 3056 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3057 alignment)); 3058 stub_complete_io(1); 3059 3060 spdk_put_io_channel(io_ch); 3061 spdk_bdev_close(desc); 3062 free_bdev(bdev); 3063 fn_table.submit_request = stub_submit_request; 3064 spdk_bdev_finish(bdev_fini_cb, NULL); 3065 poll_threads(); 3066 3067 free(buf); 3068 } 3069 3070 static void 3071 bdev_io_alignment_with_boundary(void) 3072 { 3073 struct spdk_bdev *bdev; 3074 struct spdk_bdev_desc *desc = NULL; 3075 struct spdk_io_channel *io_ch; 3076 struct spdk_bdev_opts bdev_opts = {}; 3077 int rc; 3078 void *buf = NULL; 3079 struct iovec iovs[2]; 3080 int iovcnt; 3081 uint64_t alignment; 3082 3083 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3084 bdev_opts.bdev_io_pool_size = 20; 3085 bdev_opts.bdev_io_cache_size = 2; 3086 3087 bdev_opts.opts_size = sizeof(bdev_opts); 3088 rc = spdk_bdev_set_opts(&bdev_opts); 3089 CU_ASSERT(rc == 0); 3090 spdk_bdev_initialize(bdev_init_cb, NULL); 3091 3092 fn_table.submit_request = stub_submit_request_get_buf; 3093 bdev = allocate_bdev("bdev0"); 3094 3095 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3096 CU_ASSERT(rc == 0); 3097 CU_ASSERT(desc != NULL); 3098 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3099 io_ch = spdk_bdev_get_io_channel(desc); 3100 CU_ASSERT(io_ch != NULL); 3101 3102 /* Create aligned buffer */ 3103 rc = posix_memalign(&buf, 4096, 131072); 3104 SPDK_CU_ASSERT_FATAL(rc == 0); 3105 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3106 3107 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3108 alignment = 512; 3109 bdev->required_alignment = spdk_u32log2(alignment); 3110 bdev->optimal_io_boundary = 2; 3111 bdev->split_on_optimal_io_boundary = true; 3112 3113 iovcnt = 1; 3114 iovs[0].iov_base = NULL; 3115 iovs[0].iov_len = 512 * 3; 3116 3117 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3118 CU_ASSERT(rc == 0); 3119 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3120 stub_complete_io(2); 3121 3122 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3123 alignment = 512; 3124 bdev->required_alignment = spdk_u32log2(alignment); 3125 bdev->optimal_io_boundary = 16; 3126 bdev->split_on_optimal_io_boundary = true; 3127 3128 iovcnt = 1; 3129 iovs[0].iov_base = NULL; 3130 iovs[0].iov_len = 512 * 16; 3131 3132 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3133 CU_ASSERT(rc == 0); 3134 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3135 stub_complete_io(2); 3136 3137 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3138 alignment = 512; 3139 bdev->required_alignment = spdk_u32log2(alignment); 3140 bdev->optimal_io_boundary = 128; 3141 bdev->split_on_optimal_io_boundary = true; 3142 3143 iovcnt = 1; 3144 iovs[0].iov_base = buf + 16; 3145 iovs[0].iov_len = 512 * 160; 3146 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3147 CU_ASSERT(rc == 0); 3148 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3149 stub_complete_io(2); 3150 3151 /* 512 * 3 with 2 IO boundary */ 3152 alignment = 512; 3153 bdev->required_alignment = spdk_u32log2(alignment); 3154 bdev->optimal_io_boundary = 2; 3155 bdev->split_on_optimal_io_boundary = true; 3156 3157 iovcnt = 2; 3158 iovs[0].iov_base = buf + 16; 3159 iovs[0].iov_len = 512; 3160 iovs[1].iov_base = buf + 16 + 512 + 32; 3161 iovs[1].iov_len = 1024; 3162 3163 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3164 CU_ASSERT(rc == 0); 3165 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3166 stub_complete_io(2); 3167 3168 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3169 CU_ASSERT(rc == 0); 3170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3171 stub_complete_io(2); 3172 3173 /* 512 * 64 with 32 IO boundary */ 3174 bdev->optimal_io_boundary = 32; 3175 iovcnt = 2; 3176 iovs[0].iov_base = buf + 16; 3177 iovs[0].iov_len = 16384; 3178 iovs[1].iov_base = buf + 16 + 16384 + 32; 3179 iovs[1].iov_len = 16384; 3180 3181 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3182 CU_ASSERT(rc == 0); 3183 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3184 stub_complete_io(3); 3185 3186 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3187 CU_ASSERT(rc == 0); 3188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3189 stub_complete_io(3); 3190 3191 /* 512 * 160 with 32 IO boundary */ 3192 iovcnt = 1; 3193 iovs[0].iov_base = buf + 16; 3194 iovs[0].iov_len = 16384 + 65536; 3195 3196 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3197 CU_ASSERT(rc == 0); 3198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3199 stub_complete_io(6); 3200 3201 spdk_put_io_channel(io_ch); 3202 spdk_bdev_close(desc); 3203 free_bdev(bdev); 3204 fn_table.submit_request = stub_submit_request; 3205 spdk_bdev_finish(bdev_fini_cb, NULL); 3206 poll_threads(); 3207 3208 free(buf); 3209 } 3210 3211 static void 3212 histogram_status_cb(void *cb_arg, int status) 3213 { 3214 g_status = status; 3215 } 3216 3217 static void 3218 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3219 { 3220 g_status = status; 3221 g_histogram = histogram; 3222 } 3223 3224 static void 3225 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3226 uint64_t total, uint64_t so_far) 3227 { 3228 g_count += count; 3229 } 3230 3231 static void 3232 bdev_histograms(void) 3233 { 3234 struct spdk_bdev *bdev; 3235 struct spdk_bdev_desc *desc = NULL; 3236 struct spdk_io_channel *ch; 3237 struct spdk_histogram_data *histogram; 3238 uint8_t buf[4096]; 3239 int rc; 3240 3241 spdk_bdev_initialize(bdev_init_cb, NULL); 3242 3243 bdev = allocate_bdev("bdev"); 3244 3245 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3246 CU_ASSERT(rc == 0); 3247 CU_ASSERT(desc != NULL); 3248 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3249 3250 ch = spdk_bdev_get_io_channel(desc); 3251 CU_ASSERT(ch != NULL); 3252 3253 /* Enable histogram */ 3254 g_status = -1; 3255 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3256 poll_threads(); 3257 CU_ASSERT(g_status == 0); 3258 CU_ASSERT(bdev->internal.histogram_enabled == true); 3259 3260 /* Allocate histogram */ 3261 histogram = spdk_histogram_data_alloc(); 3262 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3263 3264 /* Check if histogram is zeroed */ 3265 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3266 poll_threads(); 3267 CU_ASSERT(g_status == 0); 3268 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3269 3270 g_count = 0; 3271 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3272 3273 CU_ASSERT(g_count == 0); 3274 3275 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3276 CU_ASSERT(rc == 0); 3277 3278 spdk_delay_us(10); 3279 stub_complete_io(1); 3280 poll_threads(); 3281 3282 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3283 CU_ASSERT(rc == 0); 3284 3285 spdk_delay_us(10); 3286 stub_complete_io(1); 3287 poll_threads(); 3288 3289 /* Check if histogram gathered data from all I/O channels */ 3290 g_histogram = NULL; 3291 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3292 poll_threads(); 3293 CU_ASSERT(g_status == 0); 3294 CU_ASSERT(bdev->internal.histogram_enabled == true); 3295 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3296 3297 g_count = 0; 3298 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3299 CU_ASSERT(g_count == 2); 3300 3301 /* Disable histogram */ 3302 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3303 poll_threads(); 3304 CU_ASSERT(g_status == 0); 3305 CU_ASSERT(bdev->internal.histogram_enabled == false); 3306 3307 /* Try to run histogram commands on disabled bdev */ 3308 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3309 poll_threads(); 3310 CU_ASSERT(g_status == -EFAULT); 3311 3312 spdk_histogram_data_free(histogram); 3313 spdk_put_io_channel(ch); 3314 spdk_bdev_close(desc); 3315 free_bdev(bdev); 3316 spdk_bdev_finish(bdev_fini_cb, NULL); 3317 poll_threads(); 3318 } 3319 3320 static void 3321 _bdev_compare(bool emulated) 3322 { 3323 struct spdk_bdev *bdev; 3324 struct spdk_bdev_desc *desc = NULL; 3325 struct spdk_io_channel *ioch; 3326 struct ut_expected_io *expected_io; 3327 uint64_t offset, num_blocks; 3328 uint32_t num_completed; 3329 char aa_buf[512]; 3330 char bb_buf[512]; 3331 struct iovec compare_iov; 3332 uint8_t expected_io_type; 3333 int rc; 3334 3335 if (emulated) { 3336 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3337 } else { 3338 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3339 } 3340 3341 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3342 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3343 3344 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3345 3346 spdk_bdev_initialize(bdev_init_cb, NULL); 3347 fn_table.submit_request = stub_submit_request_get_buf; 3348 bdev = allocate_bdev("bdev"); 3349 3350 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3351 CU_ASSERT_EQUAL(rc, 0); 3352 SPDK_CU_ASSERT_FATAL(desc != NULL); 3353 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3354 ioch = spdk_bdev_get_io_channel(desc); 3355 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3356 3357 fn_table.submit_request = stub_submit_request_get_buf; 3358 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3359 3360 offset = 50; 3361 num_blocks = 1; 3362 compare_iov.iov_base = aa_buf; 3363 compare_iov.iov_len = sizeof(aa_buf); 3364 3365 /* 1. successful compare */ 3366 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3367 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3368 3369 g_io_done = false; 3370 g_compare_read_buf = aa_buf; 3371 g_compare_read_buf_len = sizeof(aa_buf); 3372 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3373 CU_ASSERT_EQUAL(rc, 0); 3374 num_completed = stub_complete_io(1); 3375 CU_ASSERT_EQUAL(num_completed, 1); 3376 CU_ASSERT(g_io_done == true); 3377 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3378 3379 /* 2. miscompare */ 3380 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3381 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3382 3383 g_io_done = false; 3384 g_compare_read_buf = bb_buf; 3385 g_compare_read_buf_len = sizeof(bb_buf); 3386 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3387 CU_ASSERT_EQUAL(rc, 0); 3388 num_completed = stub_complete_io(1); 3389 CU_ASSERT_EQUAL(num_completed, 1); 3390 CU_ASSERT(g_io_done == true); 3391 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3392 3393 spdk_put_io_channel(ioch); 3394 spdk_bdev_close(desc); 3395 free_bdev(bdev); 3396 fn_table.submit_request = stub_submit_request; 3397 spdk_bdev_finish(bdev_fini_cb, NULL); 3398 poll_threads(); 3399 3400 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3401 3402 g_compare_read_buf = NULL; 3403 } 3404 3405 static void 3406 _bdev_compare_with_md(bool emulated) 3407 { 3408 struct spdk_bdev *bdev; 3409 struct spdk_bdev_desc *desc = NULL; 3410 struct spdk_io_channel *ioch; 3411 struct ut_expected_io *expected_io; 3412 uint64_t offset, num_blocks; 3413 uint32_t num_completed; 3414 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3415 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3416 char buf_miscompare[1024 /* 2 * blocklen */]; 3417 char md_buf[16]; 3418 char md_buf_miscompare[16]; 3419 struct iovec compare_iov; 3420 uint8_t expected_io_type; 3421 int rc; 3422 3423 if (emulated) { 3424 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3425 } else { 3426 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3427 } 3428 3429 memset(buf, 0xaa, sizeof(buf)); 3430 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3431 /* make last md different */ 3432 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3433 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3434 memset(md_buf, 0xaa, 16); 3435 memset(md_buf_miscompare, 0xbb, 16); 3436 3437 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3438 3439 spdk_bdev_initialize(bdev_init_cb, NULL); 3440 fn_table.submit_request = stub_submit_request_get_buf; 3441 bdev = allocate_bdev("bdev"); 3442 3443 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3444 CU_ASSERT_EQUAL(rc, 0); 3445 SPDK_CU_ASSERT_FATAL(desc != NULL); 3446 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3447 ioch = spdk_bdev_get_io_channel(desc); 3448 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3449 3450 fn_table.submit_request = stub_submit_request_get_buf; 3451 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3452 3453 offset = 50; 3454 num_blocks = 2; 3455 3456 /* interleaved md & data */ 3457 bdev->md_interleave = true; 3458 bdev->md_len = 8; 3459 bdev->blocklen = 512 + 8; 3460 compare_iov.iov_base = buf; 3461 compare_iov.iov_len = sizeof(buf); 3462 3463 /* 1. successful compare with md interleaved */ 3464 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3465 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3466 3467 g_io_done = false; 3468 g_compare_read_buf = buf; 3469 g_compare_read_buf_len = sizeof(buf); 3470 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3471 CU_ASSERT_EQUAL(rc, 0); 3472 num_completed = stub_complete_io(1); 3473 CU_ASSERT_EQUAL(num_completed, 1); 3474 CU_ASSERT(g_io_done == true); 3475 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3476 3477 /* 2. miscompare with md interleaved */ 3478 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3479 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3480 3481 g_io_done = false; 3482 g_compare_read_buf = buf_interleaved_miscompare; 3483 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3484 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3485 CU_ASSERT_EQUAL(rc, 0); 3486 num_completed = stub_complete_io(1); 3487 CU_ASSERT_EQUAL(num_completed, 1); 3488 CU_ASSERT(g_io_done == true); 3489 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3490 3491 /* Separate data & md buffers */ 3492 bdev->md_interleave = false; 3493 bdev->blocklen = 512; 3494 compare_iov.iov_base = buf; 3495 compare_iov.iov_len = 1024; 3496 3497 /* 3. successful compare with md separated */ 3498 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3499 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3500 3501 g_io_done = false; 3502 g_compare_read_buf = buf; 3503 g_compare_read_buf_len = 1024; 3504 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3505 g_compare_md_buf = md_buf; 3506 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3507 offset, num_blocks, io_done, NULL); 3508 CU_ASSERT_EQUAL(rc, 0); 3509 num_completed = stub_complete_io(1); 3510 CU_ASSERT_EQUAL(num_completed, 1); 3511 CU_ASSERT(g_io_done == true); 3512 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3513 3514 /* 4. miscompare with md separated where md buf is different */ 3515 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3516 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3517 3518 g_io_done = false; 3519 g_compare_read_buf = buf; 3520 g_compare_read_buf_len = 1024; 3521 g_compare_md_buf = md_buf_miscompare; 3522 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3523 offset, num_blocks, io_done, NULL); 3524 CU_ASSERT_EQUAL(rc, 0); 3525 num_completed = stub_complete_io(1); 3526 CU_ASSERT_EQUAL(num_completed, 1); 3527 CU_ASSERT(g_io_done == true); 3528 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3529 3530 /* 5. miscompare with md separated where buf is different */ 3531 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3532 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3533 3534 g_io_done = false; 3535 g_compare_read_buf = buf_miscompare; 3536 g_compare_read_buf_len = sizeof(buf_miscompare); 3537 g_compare_md_buf = md_buf; 3538 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3539 offset, num_blocks, io_done, NULL); 3540 CU_ASSERT_EQUAL(rc, 0); 3541 num_completed = stub_complete_io(1); 3542 CU_ASSERT_EQUAL(num_completed, 1); 3543 CU_ASSERT(g_io_done == true); 3544 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3545 3546 bdev->md_len = 0; 3547 g_compare_md_buf = NULL; 3548 3549 spdk_put_io_channel(ioch); 3550 spdk_bdev_close(desc); 3551 free_bdev(bdev); 3552 fn_table.submit_request = stub_submit_request; 3553 spdk_bdev_finish(bdev_fini_cb, NULL); 3554 poll_threads(); 3555 3556 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3557 3558 g_compare_read_buf = NULL; 3559 } 3560 3561 static void 3562 bdev_compare(void) 3563 { 3564 _bdev_compare(false); 3565 _bdev_compare_with_md(false); 3566 } 3567 3568 static void 3569 bdev_compare_emulated(void) 3570 { 3571 _bdev_compare(true); 3572 _bdev_compare_with_md(true); 3573 } 3574 3575 static void 3576 bdev_compare_and_write(void) 3577 { 3578 struct spdk_bdev *bdev; 3579 struct spdk_bdev_desc *desc = NULL; 3580 struct spdk_io_channel *ioch; 3581 struct ut_expected_io *expected_io; 3582 uint64_t offset, num_blocks; 3583 uint32_t num_completed; 3584 char aa_buf[512]; 3585 char bb_buf[512]; 3586 char cc_buf[512]; 3587 char write_buf[512]; 3588 struct iovec compare_iov; 3589 struct iovec write_iov; 3590 int rc; 3591 3592 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3593 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3594 memset(cc_buf, 0xcc, sizeof(cc_buf)); 3595 3596 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 3597 3598 spdk_bdev_initialize(bdev_init_cb, NULL); 3599 fn_table.submit_request = stub_submit_request_get_buf; 3600 bdev = allocate_bdev("bdev"); 3601 3602 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3603 CU_ASSERT_EQUAL(rc, 0); 3604 SPDK_CU_ASSERT_FATAL(desc != NULL); 3605 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3606 ioch = spdk_bdev_get_io_channel(desc); 3607 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3608 3609 fn_table.submit_request = stub_submit_request_get_buf; 3610 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3611 3612 offset = 50; 3613 num_blocks = 1; 3614 compare_iov.iov_base = aa_buf; 3615 compare_iov.iov_len = sizeof(aa_buf); 3616 write_iov.iov_base = bb_buf; 3617 write_iov.iov_len = sizeof(bb_buf); 3618 3619 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3620 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3621 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 3622 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3623 3624 g_io_done = false; 3625 g_compare_read_buf = aa_buf; 3626 g_compare_read_buf_len = sizeof(aa_buf); 3627 memset(write_buf, 0, sizeof(write_buf)); 3628 g_compare_write_buf = write_buf; 3629 g_compare_write_buf_len = sizeof(write_buf); 3630 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3631 offset, num_blocks, io_done, NULL); 3632 /* Trigger range locking */ 3633 poll_threads(); 3634 CU_ASSERT_EQUAL(rc, 0); 3635 num_completed = stub_complete_io(1); 3636 CU_ASSERT_EQUAL(num_completed, 1); 3637 CU_ASSERT(g_io_done == false); 3638 num_completed = stub_complete_io(1); 3639 /* Trigger range unlocking */ 3640 poll_threads(); 3641 CU_ASSERT_EQUAL(num_completed, 1); 3642 CU_ASSERT(g_io_done == true); 3643 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3644 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 3645 3646 /* Test miscompare */ 3647 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 3648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3649 3650 g_io_done = false; 3651 g_compare_read_buf = cc_buf; 3652 g_compare_read_buf_len = sizeof(cc_buf); 3653 memset(write_buf, 0, sizeof(write_buf)); 3654 g_compare_write_buf = write_buf; 3655 g_compare_write_buf_len = sizeof(write_buf); 3656 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 3657 offset, num_blocks, io_done, NULL); 3658 /* Trigger range locking */ 3659 poll_threads(); 3660 CU_ASSERT_EQUAL(rc, 0); 3661 num_completed = stub_complete_io(1); 3662 /* Trigger range unlocking earlier because we expect error here */ 3663 poll_threads(); 3664 CU_ASSERT_EQUAL(num_completed, 1); 3665 CU_ASSERT(g_io_done == true); 3666 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3667 num_completed = stub_complete_io(1); 3668 CU_ASSERT_EQUAL(num_completed, 0); 3669 3670 spdk_put_io_channel(ioch); 3671 spdk_bdev_close(desc); 3672 free_bdev(bdev); 3673 fn_table.submit_request = stub_submit_request; 3674 spdk_bdev_finish(bdev_fini_cb, NULL); 3675 poll_threads(); 3676 3677 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3678 3679 g_compare_read_buf = NULL; 3680 g_compare_write_buf = NULL; 3681 } 3682 3683 static void 3684 bdev_write_zeroes(void) 3685 { 3686 struct spdk_bdev *bdev; 3687 struct spdk_bdev_desc *desc = NULL; 3688 struct spdk_io_channel *ioch; 3689 struct ut_expected_io *expected_io; 3690 uint64_t offset, num_io_blocks, num_blocks; 3691 uint32_t num_completed, num_requests; 3692 int rc; 3693 3694 spdk_bdev_initialize(bdev_init_cb, NULL); 3695 bdev = allocate_bdev("bdev"); 3696 3697 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3698 CU_ASSERT_EQUAL(rc, 0); 3699 SPDK_CU_ASSERT_FATAL(desc != NULL); 3700 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3701 ioch = spdk_bdev_get_io_channel(desc); 3702 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3703 3704 fn_table.submit_request = stub_submit_request; 3705 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3706 3707 /* First test that if the bdev supports write_zeroes, the request won't be split */ 3708 bdev->md_len = 0; 3709 bdev->blocklen = 4096; 3710 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3711 3712 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 3713 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3714 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3715 CU_ASSERT_EQUAL(rc, 0); 3716 num_completed = stub_complete_io(1); 3717 CU_ASSERT_EQUAL(num_completed, 1); 3718 3719 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 3720 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 3721 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 3722 num_requests = 2; 3723 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 3724 3725 for (offset = 0; offset < num_requests; ++offset) { 3726 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3727 offset * num_io_blocks, num_io_blocks, 0); 3728 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3729 } 3730 3731 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3732 CU_ASSERT_EQUAL(rc, 0); 3733 num_completed = stub_complete_io(num_requests); 3734 CU_ASSERT_EQUAL(num_completed, num_requests); 3735 3736 /* Check that the splitting is correct if bdev has interleaved metadata */ 3737 bdev->md_interleave = true; 3738 bdev->md_len = 64; 3739 bdev->blocklen = 4096 + 64; 3740 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 3741 3742 num_requests = offset = 0; 3743 while (offset < num_blocks) { 3744 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 3745 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3746 offset, num_io_blocks, 0); 3747 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3748 offset += num_io_blocks; 3749 num_requests++; 3750 } 3751 3752 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3753 CU_ASSERT_EQUAL(rc, 0); 3754 num_completed = stub_complete_io(num_requests); 3755 CU_ASSERT_EQUAL(num_completed, num_requests); 3756 num_completed = stub_complete_io(num_requests); 3757 assert(num_completed == 0); 3758 3759 /* Check the the same for separate metadata buffer */ 3760 bdev->md_interleave = false; 3761 bdev->md_len = 64; 3762 bdev->blocklen = 4096; 3763 3764 num_requests = offset = 0; 3765 while (offset < num_blocks) { 3766 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 3767 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 3768 offset, num_io_blocks, 0); 3769 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 3770 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3771 offset += num_io_blocks; 3772 num_requests++; 3773 } 3774 3775 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 3776 CU_ASSERT_EQUAL(rc, 0); 3777 num_completed = stub_complete_io(num_requests); 3778 CU_ASSERT_EQUAL(num_completed, num_requests); 3779 3780 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 3781 spdk_put_io_channel(ioch); 3782 spdk_bdev_close(desc); 3783 free_bdev(bdev); 3784 spdk_bdev_finish(bdev_fini_cb, NULL); 3785 poll_threads(); 3786 } 3787 3788 static void 3789 bdev_zcopy_write(void) 3790 { 3791 struct spdk_bdev *bdev; 3792 struct spdk_bdev_desc *desc = NULL; 3793 struct spdk_io_channel *ioch; 3794 struct ut_expected_io *expected_io; 3795 uint64_t offset, num_blocks; 3796 uint32_t num_completed; 3797 char aa_buf[512]; 3798 struct iovec iov; 3799 int rc; 3800 const bool populate = false; 3801 const bool commit = true; 3802 3803 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3804 3805 spdk_bdev_initialize(bdev_init_cb, NULL); 3806 bdev = allocate_bdev("bdev"); 3807 3808 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3809 CU_ASSERT_EQUAL(rc, 0); 3810 SPDK_CU_ASSERT_FATAL(desc != NULL); 3811 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3812 ioch = spdk_bdev_get_io_channel(desc); 3813 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3814 3815 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3816 3817 offset = 50; 3818 num_blocks = 1; 3819 iov.iov_base = NULL; 3820 iov.iov_len = 0; 3821 3822 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 3823 g_zcopy_read_buf_len = (uint32_t) -1; 3824 /* Do a zcopy start for a write (populate=false) */ 3825 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3826 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3827 g_io_done = false; 3828 g_zcopy_write_buf = aa_buf; 3829 g_zcopy_write_buf_len = sizeof(aa_buf); 3830 g_zcopy_bdev_io = NULL; 3831 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3832 CU_ASSERT_EQUAL(rc, 0); 3833 num_completed = stub_complete_io(1); 3834 CU_ASSERT_EQUAL(num_completed, 1); 3835 CU_ASSERT(g_io_done == true); 3836 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3837 /* Check that the iov has been set up */ 3838 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 3839 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 3840 /* Check that the bdev_io has been saved */ 3841 CU_ASSERT(g_zcopy_bdev_io != NULL); 3842 /* Now do the zcopy end for a write (commit=true) */ 3843 g_io_done = false; 3844 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3845 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3846 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3847 CU_ASSERT_EQUAL(rc, 0); 3848 num_completed = stub_complete_io(1); 3849 CU_ASSERT_EQUAL(num_completed, 1); 3850 CU_ASSERT(g_io_done == true); 3851 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3852 /* Check the g_zcopy are reset by io_done */ 3853 CU_ASSERT(g_zcopy_write_buf == NULL); 3854 CU_ASSERT(g_zcopy_write_buf_len == 0); 3855 /* Check that io_done has freed the g_zcopy_bdev_io */ 3856 CU_ASSERT(g_zcopy_bdev_io == NULL); 3857 3858 /* Check the zcopy read buffer has not been touched which 3859 * ensures that the correct buffers were used. 3860 */ 3861 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 3862 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 3863 3864 spdk_put_io_channel(ioch); 3865 spdk_bdev_close(desc); 3866 free_bdev(bdev); 3867 spdk_bdev_finish(bdev_fini_cb, NULL); 3868 poll_threads(); 3869 } 3870 3871 static void 3872 bdev_zcopy_read(void) 3873 { 3874 struct spdk_bdev *bdev; 3875 struct spdk_bdev_desc *desc = NULL; 3876 struct spdk_io_channel *ioch; 3877 struct ut_expected_io *expected_io; 3878 uint64_t offset, num_blocks; 3879 uint32_t num_completed; 3880 char aa_buf[512]; 3881 struct iovec iov; 3882 int rc; 3883 const bool populate = true; 3884 const bool commit = false; 3885 3886 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3887 3888 spdk_bdev_initialize(bdev_init_cb, NULL); 3889 bdev = allocate_bdev("bdev"); 3890 3891 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3892 CU_ASSERT_EQUAL(rc, 0); 3893 SPDK_CU_ASSERT_FATAL(desc != NULL); 3894 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3895 ioch = spdk_bdev_get_io_channel(desc); 3896 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3897 3898 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3899 3900 offset = 50; 3901 num_blocks = 1; 3902 iov.iov_base = NULL; 3903 iov.iov_len = 0; 3904 3905 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 3906 g_zcopy_write_buf_len = (uint32_t) -1; 3907 3908 /* Do a zcopy start for a read (populate=true) */ 3909 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3910 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3911 g_io_done = false; 3912 g_zcopy_read_buf = aa_buf; 3913 g_zcopy_read_buf_len = sizeof(aa_buf); 3914 g_zcopy_bdev_io = NULL; 3915 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 3916 CU_ASSERT_EQUAL(rc, 0); 3917 num_completed = stub_complete_io(1); 3918 CU_ASSERT_EQUAL(num_completed, 1); 3919 CU_ASSERT(g_io_done == true); 3920 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3921 /* Check that the iov has been set up */ 3922 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 3923 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 3924 /* Check that the bdev_io has been saved */ 3925 CU_ASSERT(g_zcopy_bdev_io != NULL); 3926 3927 /* Now do the zcopy end for a read (commit=false) */ 3928 g_io_done = false; 3929 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 3930 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3931 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 3932 CU_ASSERT_EQUAL(rc, 0); 3933 num_completed = stub_complete_io(1); 3934 CU_ASSERT_EQUAL(num_completed, 1); 3935 CU_ASSERT(g_io_done == true); 3936 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3937 /* Check the g_zcopy are reset by io_done */ 3938 CU_ASSERT(g_zcopy_read_buf == NULL); 3939 CU_ASSERT(g_zcopy_read_buf_len == 0); 3940 /* Check that io_done has freed the g_zcopy_bdev_io */ 3941 CU_ASSERT(g_zcopy_bdev_io == NULL); 3942 3943 /* Check the zcopy write buffer has not been touched which 3944 * ensures that the correct buffers were used. 3945 */ 3946 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 3947 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 3948 3949 spdk_put_io_channel(ioch); 3950 spdk_bdev_close(desc); 3951 free_bdev(bdev); 3952 spdk_bdev_finish(bdev_fini_cb, NULL); 3953 poll_threads(); 3954 } 3955 3956 static void 3957 bdev_open_while_hotremove(void) 3958 { 3959 struct spdk_bdev *bdev; 3960 struct spdk_bdev_desc *desc[2] = {}; 3961 int rc; 3962 3963 bdev = allocate_bdev("bdev"); 3964 3965 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 3966 CU_ASSERT(rc == 0); 3967 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 3968 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 3969 3970 spdk_bdev_unregister(bdev, NULL, NULL); 3971 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 3972 poll_threads(); 3973 3974 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 3975 CU_ASSERT(rc == -ENODEV); 3976 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 3977 3978 spdk_bdev_close(desc[0]); 3979 free_bdev(bdev); 3980 } 3981 3982 static void 3983 bdev_close_while_hotremove(void) 3984 { 3985 struct spdk_bdev *bdev; 3986 struct spdk_bdev_desc *desc = NULL; 3987 int rc = 0; 3988 3989 bdev = allocate_bdev("bdev"); 3990 3991 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 3992 CU_ASSERT_EQUAL(rc, 0); 3993 SPDK_CU_ASSERT_FATAL(desc != NULL); 3994 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3995 3996 /* Simulate hot-unplug by unregistering bdev */ 3997 g_event_type1 = 0xFF; 3998 g_unregister_arg = NULL; 3999 g_unregister_rc = -1; 4000 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4001 /* Close device while remove event is in flight */ 4002 spdk_bdev_close(desc); 4003 4004 /* Ensure that unregister callback is delayed */ 4005 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4006 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4007 4008 poll_threads(); 4009 4010 /* Event callback shall not be issued because device was closed */ 4011 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4012 /* Unregister callback is issued */ 4013 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4014 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4015 4016 free_bdev(bdev); 4017 } 4018 4019 static void 4020 bdev_open_ext(void) 4021 { 4022 struct spdk_bdev *bdev; 4023 struct spdk_bdev_desc *desc1 = NULL; 4024 struct spdk_bdev_desc *desc2 = NULL; 4025 int rc = 0; 4026 4027 bdev = allocate_bdev("bdev"); 4028 4029 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4030 CU_ASSERT_EQUAL(rc, -EINVAL); 4031 4032 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4033 CU_ASSERT_EQUAL(rc, 0); 4034 4035 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4036 CU_ASSERT_EQUAL(rc, 0); 4037 4038 g_event_type1 = 0xFF; 4039 g_event_type2 = 0xFF; 4040 4041 /* Simulate hot-unplug by unregistering bdev */ 4042 spdk_bdev_unregister(bdev, NULL, NULL); 4043 poll_threads(); 4044 4045 /* Check if correct events have been triggered in event callback fn */ 4046 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4047 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4048 4049 free_bdev(bdev); 4050 poll_threads(); 4051 } 4052 4053 static void 4054 bdev_open_ext_unregister(void) 4055 { 4056 struct spdk_bdev *bdev; 4057 struct spdk_bdev_desc *desc1 = NULL; 4058 struct spdk_bdev_desc *desc2 = NULL; 4059 struct spdk_bdev_desc *desc3 = NULL; 4060 struct spdk_bdev_desc *desc4 = NULL; 4061 int rc = 0; 4062 4063 bdev = allocate_bdev("bdev"); 4064 4065 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4066 CU_ASSERT_EQUAL(rc, -EINVAL); 4067 4068 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4069 CU_ASSERT_EQUAL(rc, 0); 4070 4071 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4072 CU_ASSERT_EQUAL(rc, 0); 4073 4074 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4075 CU_ASSERT_EQUAL(rc, 0); 4076 4077 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4078 CU_ASSERT_EQUAL(rc, 0); 4079 4080 g_event_type1 = 0xFF; 4081 g_event_type2 = 0xFF; 4082 g_event_type3 = 0xFF; 4083 g_event_type4 = 0xFF; 4084 4085 g_unregister_arg = NULL; 4086 g_unregister_rc = -1; 4087 4088 /* Simulate hot-unplug by unregistering bdev */ 4089 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4090 4091 /* 4092 * Unregister is handled asynchronously and event callback 4093 * (i.e., above bdev_open_cbN) will be called. 4094 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4095 * close the desc3 and desc4 so that the bdev is not closed. 4096 */ 4097 poll_threads(); 4098 4099 /* Check if correct events have been triggered in event callback fn */ 4100 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4101 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4102 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4103 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4104 4105 /* Check that unregister callback is delayed */ 4106 CU_ASSERT(g_unregister_arg == NULL); 4107 CU_ASSERT(g_unregister_rc == -1); 4108 4109 /* 4110 * Explicitly close desc3. As desc4 is still opened there, the 4111 * unergister callback is still delayed to execute. 4112 */ 4113 spdk_bdev_close(desc3); 4114 CU_ASSERT(g_unregister_arg == NULL); 4115 CU_ASSERT(g_unregister_rc == -1); 4116 4117 /* 4118 * Explicitly close desc4 to trigger the ongoing bdev unregister 4119 * operation after last desc is closed. 4120 */ 4121 spdk_bdev_close(desc4); 4122 4123 /* Poll the thread for the async unregister operation */ 4124 poll_threads(); 4125 4126 /* Check that unregister callback is executed */ 4127 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4128 CU_ASSERT(g_unregister_rc == 0); 4129 4130 free_bdev(bdev); 4131 poll_threads(); 4132 } 4133 4134 struct timeout_io_cb_arg { 4135 struct iovec iov; 4136 uint8_t type; 4137 }; 4138 4139 static int 4140 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4141 { 4142 struct spdk_bdev_io *bdev_io; 4143 int n = 0; 4144 4145 if (!ch) { 4146 return -1; 4147 } 4148 4149 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4150 n++; 4151 } 4152 4153 return n; 4154 } 4155 4156 static void 4157 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4158 { 4159 struct timeout_io_cb_arg *ctx = cb_arg; 4160 4161 ctx->type = bdev_io->type; 4162 ctx->iov.iov_base = bdev_io->iov.iov_base; 4163 ctx->iov.iov_len = bdev_io->iov.iov_len; 4164 } 4165 4166 static void 4167 bdev_set_io_timeout(void) 4168 { 4169 struct spdk_bdev *bdev; 4170 struct spdk_bdev_desc *desc = NULL; 4171 struct spdk_io_channel *io_ch = NULL; 4172 struct spdk_bdev_channel *bdev_ch = NULL; 4173 struct timeout_io_cb_arg cb_arg; 4174 4175 spdk_bdev_initialize(bdev_init_cb, NULL); 4176 4177 bdev = allocate_bdev("bdev"); 4178 4179 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4180 SPDK_CU_ASSERT_FATAL(desc != NULL); 4181 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4182 4183 io_ch = spdk_bdev_get_io_channel(desc); 4184 CU_ASSERT(io_ch != NULL); 4185 4186 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4187 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4188 4189 /* This is the part1. 4190 * We will check the bdev_ch->io_submitted list 4191 * TO make sure that it can link IOs and only the user submitted IOs 4192 */ 4193 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4194 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4195 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4196 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4197 stub_complete_io(1); 4198 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4199 stub_complete_io(1); 4200 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4201 4202 /* Split IO */ 4203 bdev->optimal_io_boundary = 16; 4204 bdev->split_on_optimal_io_boundary = true; 4205 4206 /* Now test that a single-vector command is split correctly. 4207 * Offset 14, length 8, payload 0xF000 4208 * Child - Offset 14, length 2, payload 0xF000 4209 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4210 * 4211 * Set up the expected values before calling spdk_bdev_read_blocks 4212 */ 4213 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4214 /* We count all submitted IOs including IO that are generated by splitting. */ 4215 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4216 stub_complete_io(1); 4217 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4218 stub_complete_io(1); 4219 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4220 4221 /* Also include the reset IO */ 4222 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4223 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4224 poll_threads(); 4225 stub_complete_io(1); 4226 poll_threads(); 4227 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4228 4229 /* This is part2 4230 * Test the desc timeout poller register 4231 */ 4232 4233 /* Successfully set the timeout */ 4234 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4235 CU_ASSERT(desc->io_timeout_poller != NULL); 4236 CU_ASSERT(desc->timeout_in_sec == 30); 4237 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4238 CU_ASSERT(desc->cb_arg == &cb_arg); 4239 4240 /* Change the timeout limit */ 4241 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4242 CU_ASSERT(desc->io_timeout_poller != NULL); 4243 CU_ASSERT(desc->timeout_in_sec == 20); 4244 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4245 CU_ASSERT(desc->cb_arg == &cb_arg); 4246 4247 /* Disable the timeout */ 4248 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4249 CU_ASSERT(desc->io_timeout_poller == NULL); 4250 4251 /* This the part3 4252 * We will test to catch timeout IO and check whether the IO is 4253 * the submitted one. 4254 */ 4255 memset(&cb_arg, 0, sizeof(cb_arg)); 4256 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4257 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4258 4259 /* Don't reach the limit */ 4260 spdk_delay_us(15 * spdk_get_ticks_hz()); 4261 poll_threads(); 4262 CU_ASSERT(cb_arg.type == 0); 4263 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4264 CU_ASSERT(cb_arg.iov.iov_len == 0); 4265 4266 /* 15 + 15 = 30 reach the limit */ 4267 spdk_delay_us(15 * spdk_get_ticks_hz()); 4268 poll_threads(); 4269 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4270 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4271 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4272 stub_complete_io(1); 4273 4274 /* Use the same split IO above and check the IO */ 4275 memset(&cb_arg, 0, sizeof(cb_arg)); 4276 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4277 4278 /* The first child complete in time */ 4279 spdk_delay_us(15 * spdk_get_ticks_hz()); 4280 poll_threads(); 4281 stub_complete_io(1); 4282 CU_ASSERT(cb_arg.type == 0); 4283 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4284 CU_ASSERT(cb_arg.iov.iov_len == 0); 4285 4286 /* The second child reach the limit */ 4287 spdk_delay_us(15 * spdk_get_ticks_hz()); 4288 poll_threads(); 4289 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4290 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4291 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4292 stub_complete_io(1); 4293 4294 /* Also include the reset IO */ 4295 memset(&cb_arg, 0, sizeof(cb_arg)); 4296 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4297 spdk_delay_us(30 * spdk_get_ticks_hz()); 4298 poll_threads(); 4299 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4300 stub_complete_io(1); 4301 poll_threads(); 4302 4303 spdk_put_io_channel(io_ch); 4304 spdk_bdev_close(desc); 4305 free_bdev(bdev); 4306 spdk_bdev_finish(bdev_fini_cb, NULL); 4307 poll_threads(); 4308 } 4309 4310 static void 4311 bdev_set_qd_sampling(void) 4312 { 4313 struct spdk_bdev *bdev; 4314 struct spdk_bdev_desc *desc = NULL; 4315 struct spdk_io_channel *io_ch = NULL; 4316 struct spdk_bdev_channel *bdev_ch = NULL; 4317 struct timeout_io_cb_arg cb_arg; 4318 4319 spdk_bdev_initialize(bdev_init_cb, NULL); 4320 4321 bdev = allocate_bdev("bdev"); 4322 4323 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4324 SPDK_CU_ASSERT_FATAL(desc != NULL); 4325 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4326 4327 io_ch = spdk_bdev_get_io_channel(desc); 4328 CU_ASSERT(io_ch != NULL); 4329 4330 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4331 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4332 4333 /* This is the part1. 4334 * We will check the bdev_ch->io_submitted list 4335 * TO make sure that it can link IOs and only the user submitted IOs 4336 */ 4337 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4338 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4339 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4340 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4341 stub_complete_io(1); 4342 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4343 stub_complete_io(1); 4344 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4345 4346 /* This is the part2. 4347 * Test the bdev's qd poller register 4348 */ 4349 /* 1st Successfully set the qd sampling period */ 4350 spdk_bdev_set_qd_sampling_period(bdev, 10); 4351 CU_ASSERT(bdev->internal.new_period == 10); 4352 CU_ASSERT(bdev->internal.period == 10); 4353 CU_ASSERT(bdev->internal.qd_desc != NULL); 4354 poll_threads(); 4355 CU_ASSERT(bdev->internal.qd_poller != NULL); 4356 4357 /* 2nd Change the qd sampling period */ 4358 spdk_bdev_set_qd_sampling_period(bdev, 20); 4359 CU_ASSERT(bdev->internal.new_period == 20); 4360 CU_ASSERT(bdev->internal.period == 10); 4361 CU_ASSERT(bdev->internal.qd_desc != NULL); 4362 poll_threads(); 4363 CU_ASSERT(bdev->internal.qd_poller != NULL); 4364 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4365 4366 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4367 spdk_delay_us(20); 4368 poll_thread_times(0, 1); 4369 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4370 spdk_bdev_set_qd_sampling_period(bdev, 30); 4371 CU_ASSERT(bdev->internal.new_period == 30); 4372 CU_ASSERT(bdev->internal.period == 20); 4373 poll_threads(); 4374 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4375 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4376 4377 /* 4th Disable the qd sampling period */ 4378 spdk_bdev_set_qd_sampling_period(bdev, 0); 4379 CU_ASSERT(bdev->internal.new_period == 0); 4380 CU_ASSERT(bdev->internal.period == 30); 4381 poll_threads(); 4382 CU_ASSERT(bdev->internal.qd_poller == NULL); 4383 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4384 CU_ASSERT(bdev->internal.qd_desc == NULL); 4385 4386 /* This is the part3. 4387 * We will test the submitted IO and reset works 4388 * properly with the qd sampling. 4389 */ 4390 memset(&cb_arg, 0, sizeof(cb_arg)); 4391 spdk_bdev_set_qd_sampling_period(bdev, 1); 4392 poll_threads(); 4393 4394 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4395 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4396 4397 /* Also include the reset IO */ 4398 memset(&cb_arg, 0, sizeof(cb_arg)); 4399 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4400 poll_threads(); 4401 4402 /* Close the desc */ 4403 spdk_put_io_channel(io_ch); 4404 spdk_bdev_close(desc); 4405 4406 /* Complete the submitted IO and reset */ 4407 stub_complete_io(2); 4408 poll_threads(); 4409 4410 free_bdev(bdev); 4411 spdk_bdev_finish(bdev_fini_cb, NULL); 4412 poll_threads(); 4413 } 4414 4415 static void 4416 lba_range_overlap(void) 4417 { 4418 struct lba_range r1, r2; 4419 4420 r1.offset = 100; 4421 r1.length = 50; 4422 4423 r2.offset = 0; 4424 r2.length = 1; 4425 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4426 4427 r2.offset = 0; 4428 r2.length = 100; 4429 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4430 4431 r2.offset = 0; 4432 r2.length = 110; 4433 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4434 4435 r2.offset = 100; 4436 r2.length = 10; 4437 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4438 4439 r2.offset = 110; 4440 r2.length = 20; 4441 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4442 4443 r2.offset = 140; 4444 r2.length = 150; 4445 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4446 4447 r2.offset = 130; 4448 r2.length = 200; 4449 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4450 4451 r2.offset = 150; 4452 r2.length = 100; 4453 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4454 4455 r2.offset = 110; 4456 r2.length = 0; 4457 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4458 } 4459 4460 static bool g_lock_lba_range_done; 4461 static bool g_unlock_lba_range_done; 4462 4463 static void 4464 lock_lba_range_done(void *ctx, int status) 4465 { 4466 g_lock_lba_range_done = true; 4467 } 4468 4469 static void 4470 unlock_lba_range_done(void *ctx, int status) 4471 { 4472 g_unlock_lba_range_done = true; 4473 } 4474 4475 static void 4476 lock_lba_range_check_ranges(void) 4477 { 4478 struct spdk_bdev *bdev; 4479 struct spdk_bdev_desc *desc = NULL; 4480 struct spdk_io_channel *io_ch; 4481 struct spdk_bdev_channel *channel; 4482 struct lba_range *range; 4483 int ctx1; 4484 int rc; 4485 4486 spdk_bdev_initialize(bdev_init_cb, NULL); 4487 4488 bdev = allocate_bdev("bdev0"); 4489 4490 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4491 CU_ASSERT(rc == 0); 4492 CU_ASSERT(desc != NULL); 4493 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4494 io_ch = spdk_bdev_get_io_channel(desc); 4495 CU_ASSERT(io_ch != NULL); 4496 channel = spdk_io_channel_get_ctx(io_ch); 4497 4498 g_lock_lba_range_done = false; 4499 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4500 CU_ASSERT(rc == 0); 4501 poll_threads(); 4502 4503 CU_ASSERT(g_lock_lba_range_done == true); 4504 range = TAILQ_FIRST(&channel->locked_ranges); 4505 SPDK_CU_ASSERT_FATAL(range != NULL); 4506 CU_ASSERT(range->offset == 20); 4507 CU_ASSERT(range->length == 10); 4508 CU_ASSERT(range->owner_ch == channel); 4509 4510 /* Unlocks must exactly match a lock. */ 4511 g_unlock_lba_range_done = false; 4512 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4513 CU_ASSERT(rc == -EINVAL); 4514 CU_ASSERT(g_unlock_lba_range_done == false); 4515 4516 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4517 CU_ASSERT(rc == 0); 4518 spdk_delay_us(100); 4519 poll_threads(); 4520 4521 CU_ASSERT(g_unlock_lba_range_done == true); 4522 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4523 4524 spdk_put_io_channel(io_ch); 4525 spdk_bdev_close(desc); 4526 free_bdev(bdev); 4527 spdk_bdev_finish(bdev_fini_cb, NULL); 4528 poll_threads(); 4529 } 4530 4531 static void 4532 lock_lba_range_with_io_outstanding(void) 4533 { 4534 struct spdk_bdev *bdev; 4535 struct spdk_bdev_desc *desc = NULL; 4536 struct spdk_io_channel *io_ch; 4537 struct spdk_bdev_channel *channel; 4538 struct lba_range *range; 4539 char buf[4096]; 4540 int ctx1; 4541 int rc; 4542 4543 spdk_bdev_initialize(bdev_init_cb, NULL); 4544 4545 bdev = allocate_bdev("bdev0"); 4546 4547 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4548 CU_ASSERT(rc == 0); 4549 CU_ASSERT(desc != NULL); 4550 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4551 io_ch = spdk_bdev_get_io_channel(desc); 4552 CU_ASSERT(io_ch != NULL); 4553 channel = spdk_io_channel_get_ctx(io_ch); 4554 4555 g_io_done = false; 4556 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4557 CU_ASSERT(rc == 0); 4558 4559 g_lock_lba_range_done = false; 4560 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4561 CU_ASSERT(rc == 0); 4562 poll_threads(); 4563 4564 /* The lock should immediately become valid, since there are no outstanding 4565 * write I/O. 4566 */ 4567 CU_ASSERT(g_io_done == false); 4568 CU_ASSERT(g_lock_lba_range_done == true); 4569 range = TAILQ_FIRST(&channel->locked_ranges); 4570 SPDK_CU_ASSERT_FATAL(range != NULL); 4571 CU_ASSERT(range->offset == 20); 4572 CU_ASSERT(range->length == 10); 4573 CU_ASSERT(range->owner_ch == channel); 4574 CU_ASSERT(range->locked_ctx == &ctx1); 4575 4576 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4577 CU_ASSERT(rc == 0); 4578 stub_complete_io(1); 4579 spdk_delay_us(100); 4580 poll_threads(); 4581 4582 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4583 4584 /* Now try again, but with a write I/O. */ 4585 g_io_done = false; 4586 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4587 CU_ASSERT(rc == 0); 4588 4589 g_lock_lba_range_done = false; 4590 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4591 CU_ASSERT(rc == 0); 4592 poll_threads(); 4593 4594 /* The lock should not be fully valid yet, since a write I/O is outstanding. 4595 * But note that the range should be on the channel's locked_list, to make sure no 4596 * new write I/O are started. 4597 */ 4598 CU_ASSERT(g_io_done == false); 4599 CU_ASSERT(g_lock_lba_range_done == false); 4600 range = TAILQ_FIRST(&channel->locked_ranges); 4601 SPDK_CU_ASSERT_FATAL(range != NULL); 4602 CU_ASSERT(range->offset == 20); 4603 CU_ASSERT(range->length == 10); 4604 4605 /* Complete the write I/O. This should make the lock valid (checked by confirming 4606 * our callback was invoked). 4607 */ 4608 stub_complete_io(1); 4609 spdk_delay_us(100); 4610 poll_threads(); 4611 CU_ASSERT(g_io_done == true); 4612 CU_ASSERT(g_lock_lba_range_done == true); 4613 4614 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4615 CU_ASSERT(rc == 0); 4616 poll_threads(); 4617 4618 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4619 4620 spdk_put_io_channel(io_ch); 4621 spdk_bdev_close(desc); 4622 free_bdev(bdev); 4623 spdk_bdev_finish(bdev_fini_cb, NULL); 4624 poll_threads(); 4625 } 4626 4627 static void 4628 lock_lba_range_overlapped(void) 4629 { 4630 struct spdk_bdev *bdev; 4631 struct spdk_bdev_desc *desc = NULL; 4632 struct spdk_io_channel *io_ch; 4633 struct spdk_bdev_channel *channel; 4634 struct lba_range *range; 4635 int ctx1; 4636 int rc; 4637 4638 spdk_bdev_initialize(bdev_init_cb, NULL); 4639 4640 bdev = allocate_bdev("bdev0"); 4641 4642 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4643 CU_ASSERT(rc == 0); 4644 CU_ASSERT(desc != NULL); 4645 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4646 io_ch = spdk_bdev_get_io_channel(desc); 4647 CU_ASSERT(io_ch != NULL); 4648 channel = spdk_io_channel_get_ctx(io_ch); 4649 4650 /* Lock range 20-29. */ 4651 g_lock_lba_range_done = false; 4652 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4653 CU_ASSERT(rc == 0); 4654 poll_threads(); 4655 4656 CU_ASSERT(g_lock_lba_range_done == true); 4657 range = TAILQ_FIRST(&channel->locked_ranges); 4658 SPDK_CU_ASSERT_FATAL(range != NULL); 4659 CU_ASSERT(range->offset == 20); 4660 CU_ASSERT(range->length == 10); 4661 4662 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 4663 * 20-29. 4664 */ 4665 g_lock_lba_range_done = false; 4666 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 4667 CU_ASSERT(rc == 0); 4668 poll_threads(); 4669 4670 CU_ASSERT(g_lock_lba_range_done == false); 4671 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4672 SPDK_CU_ASSERT_FATAL(range != NULL); 4673 CU_ASSERT(range->offset == 25); 4674 CU_ASSERT(range->length == 15); 4675 4676 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 4677 * no longer overlaps with an active lock. 4678 */ 4679 g_unlock_lba_range_done = false; 4680 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4681 CU_ASSERT(rc == 0); 4682 poll_threads(); 4683 4684 CU_ASSERT(g_unlock_lba_range_done == true); 4685 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4686 range = TAILQ_FIRST(&channel->locked_ranges); 4687 SPDK_CU_ASSERT_FATAL(range != NULL); 4688 CU_ASSERT(range->offset == 25); 4689 CU_ASSERT(range->length == 15); 4690 4691 /* Lock 40-59. This should immediately lock since it does not overlap with the 4692 * currently active 25-39 lock. 4693 */ 4694 g_lock_lba_range_done = false; 4695 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 4696 CU_ASSERT(rc == 0); 4697 poll_threads(); 4698 4699 CU_ASSERT(g_lock_lba_range_done == true); 4700 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4701 SPDK_CU_ASSERT_FATAL(range != NULL); 4702 range = TAILQ_NEXT(range, tailq); 4703 SPDK_CU_ASSERT_FATAL(range != NULL); 4704 CU_ASSERT(range->offset == 40); 4705 CU_ASSERT(range->length == 20); 4706 4707 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 4708 g_lock_lba_range_done = false; 4709 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 4710 CU_ASSERT(rc == 0); 4711 poll_threads(); 4712 4713 CU_ASSERT(g_lock_lba_range_done == false); 4714 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4715 SPDK_CU_ASSERT_FATAL(range != NULL); 4716 CU_ASSERT(range->offset == 35); 4717 CU_ASSERT(range->length == 10); 4718 4719 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 4720 * the 40-59 lock is still active. 4721 */ 4722 g_unlock_lba_range_done = false; 4723 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 4724 CU_ASSERT(rc == 0); 4725 poll_threads(); 4726 4727 CU_ASSERT(g_unlock_lba_range_done == true); 4728 CU_ASSERT(g_lock_lba_range_done == false); 4729 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 4730 SPDK_CU_ASSERT_FATAL(range != NULL); 4731 CU_ASSERT(range->offset == 35); 4732 CU_ASSERT(range->length == 10); 4733 4734 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 4735 * no longer any active overlapping locks. 4736 */ 4737 g_unlock_lba_range_done = false; 4738 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 4739 CU_ASSERT(rc == 0); 4740 poll_threads(); 4741 4742 CU_ASSERT(g_unlock_lba_range_done == true); 4743 CU_ASSERT(g_lock_lba_range_done == true); 4744 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 4745 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 4746 SPDK_CU_ASSERT_FATAL(range != NULL); 4747 CU_ASSERT(range->offset == 35); 4748 CU_ASSERT(range->length == 10); 4749 4750 /* Finally, unlock 35-44. */ 4751 g_unlock_lba_range_done = false; 4752 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 4753 CU_ASSERT(rc == 0); 4754 poll_threads(); 4755 4756 CU_ASSERT(g_unlock_lba_range_done == true); 4757 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 4758 4759 spdk_put_io_channel(io_ch); 4760 spdk_bdev_close(desc); 4761 free_bdev(bdev); 4762 spdk_bdev_finish(bdev_fini_cb, NULL); 4763 poll_threads(); 4764 } 4765 4766 static void 4767 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 4768 { 4769 g_abort_done = true; 4770 g_abort_status = bdev_io->internal.status; 4771 spdk_bdev_free_io(bdev_io); 4772 } 4773 4774 static void 4775 bdev_io_abort(void) 4776 { 4777 struct spdk_bdev *bdev; 4778 struct spdk_bdev_desc *desc = NULL; 4779 struct spdk_io_channel *io_ch; 4780 struct spdk_bdev_channel *channel; 4781 struct spdk_bdev_mgmt_channel *mgmt_ch; 4782 struct spdk_bdev_opts bdev_opts = {}; 4783 struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2]; 4784 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 4785 int rc; 4786 4787 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4788 bdev_opts.bdev_io_pool_size = 7; 4789 bdev_opts.bdev_io_cache_size = 2; 4790 4791 rc = spdk_bdev_set_opts(&bdev_opts); 4792 CU_ASSERT(rc == 0); 4793 spdk_bdev_initialize(bdev_init_cb, NULL); 4794 4795 bdev = allocate_bdev("bdev0"); 4796 4797 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4798 CU_ASSERT(rc == 0); 4799 CU_ASSERT(desc != NULL); 4800 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4801 io_ch = spdk_bdev_get_io_channel(desc); 4802 CU_ASSERT(io_ch != NULL); 4803 channel = spdk_io_channel_get_ctx(io_ch); 4804 mgmt_ch = channel->shared_resource->mgmt_ch; 4805 4806 g_abort_done = false; 4807 4808 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 4809 4810 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4811 CU_ASSERT(rc == -ENOTSUP); 4812 4813 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 4814 4815 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 4816 CU_ASSERT(rc == 0); 4817 CU_ASSERT(g_abort_done == true); 4818 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 4819 4820 /* Test the case that the target I/O was successfully aborted. */ 4821 g_io_done = false; 4822 4823 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4824 CU_ASSERT(rc == 0); 4825 CU_ASSERT(g_io_done == false); 4826 4827 g_abort_done = false; 4828 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4829 4830 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4831 CU_ASSERT(rc == 0); 4832 CU_ASSERT(g_io_done == true); 4833 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4834 stub_complete_io(1); 4835 CU_ASSERT(g_abort_done == true); 4836 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4837 4838 /* Test the case that the target I/O was not aborted because it completed 4839 * in the middle of execution of the abort. 4840 */ 4841 g_io_done = false; 4842 4843 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 4844 CU_ASSERT(rc == 0); 4845 CU_ASSERT(g_io_done == false); 4846 4847 g_abort_done = false; 4848 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4849 4850 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4851 CU_ASSERT(rc == 0); 4852 CU_ASSERT(g_io_done == false); 4853 4854 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4855 stub_complete_io(1); 4856 CU_ASSERT(g_io_done == true); 4857 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4858 4859 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 4860 stub_complete_io(1); 4861 CU_ASSERT(g_abort_done == true); 4862 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4863 4864 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4865 4866 bdev->optimal_io_boundary = 16; 4867 bdev->split_on_optimal_io_boundary = true; 4868 4869 /* Test that a single-vector command which is split is aborted correctly. 4870 * Offset 14, length 8, payload 0xF000 4871 * Child - Offset 14, length 2, payload 0xF000 4872 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4873 */ 4874 g_io_done = false; 4875 4876 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 4877 CU_ASSERT(rc == 0); 4878 CU_ASSERT(g_io_done == false); 4879 4880 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 4881 4882 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4883 4884 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4885 CU_ASSERT(rc == 0); 4886 CU_ASSERT(g_io_done == true); 4887 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4888 stub_complete_io(2); 4889 CU_ASSERT(g_abort_done == true); 4890 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4891 4892 /* Test that a multi-vector command that needs to be split by strip and then 4893 * needs to be split is aborted correctly. Abort is requested before the second 4894 * child I/O was submitted. The parent I/O should complete with failure without 4895 * submitting the second child I/O. 4896 */ 4897 for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) { 4898 iov[i].iov_base = (void *)((i + 1) * 0x10000); 4899 iov[i].iov_len = 512; 4900 } 4901 4902 bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV; 4903 g_io_done = false; 4904 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0, 4905 BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 4906 CU_ASSERT(rc == 0); 4907 CU_ASSERT(g_io_done == false); 4908 4909 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 4910 4911 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4912 4913 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4914 CU_ASSERT(rc == 0); 4915 CU_ASSERT(g_io_done == true); 4916 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4917 stub_complete_io(1); 4918 CU_ASSERT(g_abort_done == true); 4919 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4920 4921 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4922 4923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4924 4925 bdev->optimal_io_boundary = 16; 4926 g_io_done = false; 4927 4928 /* Test that a ingle-vector command which is split is aborted correctly. 4929 * Differently from the above, the child abort request will be submitted 4930 * sequentially due to the capacity of spdk_bdev_io. 4931 */ 4932 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 4933 CU_ASSERT(rc == 0); 4934 CU_ASSERT(g_io_done == false); 4935 4936 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4937 4938 g_abort_done = false; 4939 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4940 4941 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 4942 CU_ASSERT(rc == 0); 4943 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 4944 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 4945 4946 stub_complete_io(1); 4947 CU_ASSERT(g_io_done == true); 4948 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 4949 stub_complete_io(3); 4950 CU_ASSERT(g_abort_done == true); 4951 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4952 4953 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4954 4955 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 4956 4957 spdk_put_io_channel(io_ch); 4958 spdk_bdev_close(desc); 4959 free_bdev(bdev); 4960 spdk_bdev_finish(bdev_fini_cb, NULL); 4961 poll_threads(); 4962 } 4963 4964 static void 4965 bdev_unmap(void) 4966 { 4967 struct spdk_bdev *bdev; 4968 struct spdk_bdev_desc *desc = NULL; 4969 struct spdk_io_channel *ioch; 4970 struct spdk_bdev_channel *bdev_ch; 4971 struct ut_expected_io *expected_io; 4972 struct spdk_bdev_opts bdev_opts = {}; 4973 uint32_t i, num_outstanding; 4974 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 4975 int rc; 4976 4977 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 4978 bdev_opts.bdev_io_pool_size = 512; 4979 bdev_opts.bdev_io_cache_size = 64; 4980 rc = spdk_bdev_set_opts(&bdev_opts); 4981 CU_ASSERT(rc == 0); 4982 4983 spdk_bdev_initialize(bdev_init_cb, NULL); 4984 bdev = allocate_bdev("bdev"); 4985 4986 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4987 CU_ASSERT_EQUAL(rc, 0); 4988 SPDK_CU_ASSERT_FATAL(desc != NULL); 4989 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4990 ioch = spdk_bdev_get_io_channel(desc); 4991 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4992 bdev_ch = spdk_io_channel_get_ctx(ioch); 4993 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4994 4995 fn_table.submit_request = stub_submit_request; 4996 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4997 4998 /* Case 1: First test the request won't be split */ 4999 num_blocks = 32; 5000 5001 g_io_done = false; 5002 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5003 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5004 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5005 CU_ASSERT_EQUAL(rc, 0); 5006 CU_ASSERT(g_io_done == false); 5007 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5008 stub_complete_io(1); 5009 CU_ASSERT(g_io_done == true); 5010 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5011 5012 /* Case 2: Test the split with 2 children requests */ 5013 bdev->max_unmap = 8; 5014 bdev->max_unmap_segments = 2; 5015 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5016 num_blocks = max_unmap_blocks * 2; 5017 offset = 0; 5018 5019 g_io_done = false; 5020 for (i = 0; i < 2; i++) { 5021 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5022 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5023 offset += max_unmap_blocks; 5024 } 5025 5026 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5027 CU_ASSERT_EQUAL(rc, 0); 5028 CU_ASSERT(g_io_done == false); 5029 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5030 stub_complete_io(2); 5031 CU_ASSERT(g_io_done == true); 5032 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5033 5034 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5035 num_children = 15; 5036 num_blocks = max_unmap_blocks * num_children; 5037 g_io_done = false; 5038 offset = 0; 5039 for (i = 0; i < num_children; i++) { 5040 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5042 offset += max_unmap_blocks; 5043 } 5044 5045 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5046 CU_ASSERT_EQUAL(rc, 0); 5047 CU_ASSERT(g_io_done == false); 5048 5049 while (num_children > 0) { 5050 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5052 stub_complete_io(num_outstanding); 5053 num_children -= num_outstanding; 5054 } 5055 CU_ASSERT(g_io_done == true); 5056 5057 spdk_put_io_channel(ioch); 5058 spdk_bdev_close(desc); 5059 free_bdev(bdev); 5060 spdk_bdev_finish(bdev_fini_cb, NULL); 5061 poll_threads(); 5062 } 5063 5064 static void 5065 bdev_write_zeroes_split_test(void) 5066 { 5067 struct spdk_bdev *bdev; 5068 struct spdk_bdev_desc *desc = NULL; 5069 struct spdk_io_channel *ioch; 5070 struct spdk_bdev_channel *bdev_ch; 5071 struct ut_expected_io *expected_io; 5072 struct spdk_bdev_opts bdev_opts = {}; 5073 uint32_t i, num_outstanding; 5074 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5075 int rc; 5076 5077 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5078 bdev_opts.bdev_io_pool_size = 512; 5079 bdev_opts.bdev_io_cache_size = 64; 5080 rc = spdk_bdev_set_opts(&bdev_opts); 5081 CU_ASSERT(rc == 0); 5082 5083 spdk_bdev_initialize(bdev_init_cb, NULL); 5084 bdev = allocate_bdev("bdev"); 5085 5086 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5087 CU_ASSERT_EQUAL(rc, 0); 5088 SPDK_CU_ASSERT_FATAL(desc != NULL); 5089 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5090 ioch = spdk_bdev_get_io_channel(desc); 5091 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5092 bdev_ch = spdk_io_channel_get_ctx(ioch); 5093 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5094 5095 fn_table.submit_request = stub_submit_request; 5096 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5097 5098 /* Case 1: First test the request won't be split */ 5099 num_blocks = 32; 5100 5101 g_io_done = false; 5102 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5103 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5104 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5105 CU_ASSERT_EQUAL(rc, 0); 5106 CU_ASSERT(g_io_done == false); 5107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5108 stub_complete_io(1); 5109 CU_ASSERT(g_io_done == true); 5110 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5111 5112 /* Case 2: Test the split with 2 children requests */ 5113 max_write_zeroes_blocks = 8; 5114 bdev->max_write_zeroes = max_write_zeroes_blocks; 5115 num_blocks = max_write_zeroes_blocks * 2; 5116 offset = 0; 5117 5118 g_io_done = false; 5119 for (i = 0; i < 2; i++) { 5120 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5121 0); 5122 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5123 offset += max_write_zeroes_blocks; 5124 } 5125 5126 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5127 CU_ASSERT_EQUAL(rc, 0); 5128 CU_ASSERT(g_io_done == false); 5129 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5130 stub_complete_io(2); 5131 CU_ASSERT(g_io_done == true); 5132 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5133 5134 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5135 num_children = 15; 5136 num_blocks = max_write_zeroes_blocks * num_children; 5137 g_io_done = false; 5138 offset = 0; 5139 for (i = 0; i < num_children; i++) { 5140 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5141 0); 5142 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5143 offset += max_write_zeroes_blocks; 5144 } 5145 5146 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5147 CU_ASSERT_EQUAL(rc, 0); 5148 CU_ASSERT(g_io_done == false); 5149 5150 while (num_children > 0) { 5151 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5152 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5153 stub_complete_io(num_outstanding); 5154 num_children -= num_outstanding; 5155 } 5156 CU_ASSERT(g_io_done == true); 5157 5158 spdk_put_io_channel(ioch); 5159 spdk_bdev_close(desc); 5160 free_bdev(bdev); 5161 spdk_bdev_finish(bdev_fini_cb, NULL); 5162 poll_threads(); 5163 } 5164 5165 static void 5166 bdev_set_options_test(void) 5167 { 5168 struct spdk_bdev_opts bdev_opts = {}; 5169 int rc; 5170 5171 /* Case1: Do not set opts_size */ 5172 rc = spdk_bdev_set_opts(&bdev_opts); 5173 CU_ASSERT(rc == -1); 5174 5175 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5176 bdev_opts.bdev_io_pool_size = 4; 5177 bdev_opts.bdev_io_cache_size = 2; 5178 bdev_opts.small_buf_pool_size = 4; 5179 5180 /* Case 2: Do not set valid small_buf_pool_size and large_buf_pool_size */ 5181 rc = spdk_bdev_set_opts(&bdev_opts); 5182 CU_ASSERT(rc == -1); 5183 5184 /* Case 3: Do not set valid large_buf_pool_size */ 5185 bdev_opts.small_buf_pool_size = BUF_SMALL_POOL_SIZE; 5186 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE - 1; 5187 rc = spdk_bdev_set_opts(&bdev_opts); 5188 CU_ASSERT(rc == -1); 5189 5190 /* Case4: set valid large buf_pool_size */ 5191 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE; 5192 rc = spdk_bdev_set_opts(&bdev_opts); 5193 CU_ASSERT(rc == 0); 5194 5195 /* Case5: Set different valid value for small and large buf pool */ 5196 bdev_opts.large_buf_pool_size = BUF_SMALL_POOL_SIZE + 3; 5197 bdev_opts.large_buf_pool_size = BUF_LARGE_POOL_SIZE + 3; 5198 rc = spdk_bdev_set_opts(&bdev_opts); 5199 CU_ASSERT(rc == 0); 5200 } 5201 5202 static uint64_t 5203 get_ns_time(void) 5204 { 5205 int rc; 5206 struct timespec ts; 5207 5208 rc = clock_gettime(CLOCK_MONOTONIC, &ts); 5209 CU_ASSERT(rc == 0); 5210 return ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec; 5211 } 5212 5213 static int 5214 rb_tree_get_height(struct spdk_bdev_name *bdev_name) 5215 { 5216 int h1, h2; 5217 5218 if (bdev_name == NULL) { 5219 return -1; 5220 } else { 5221 h1 = rb_tree_get_height(RB_LEFT(bdev_name, node)); 5222 h2 = rb_tree_get_height(RB_RIGHT(bdev_name, node)); 5223 5224 return spdk_max(h1, h2) + 1; 5225 } 5226 } 5227 5228 static void 5229 bdev_multi_allocation(void) 5230 { 5231 const int max_bdev_num = 1024 * 16; 5232 char name[max_bdev_num][16]; 5233 char noexist_name[] = "invalid_bdev"; 5234 struct spdk_bdev *bdev[max_bdev_num]; 5235 int i, j; 5236 uint64_t last_time; 5237 int bdev_num; 5238 int height; 5239 5240 for (j = 0; j < max_bdev_num; j++) { 5241 snprintf(name[j], sizeof(name[j]), "bdev%d", j); 5242 } 5243 5244 for (i = 0; i < 16; i++) { 5245 last_time = get_ns_time(); 5246 bdev_num = 1024 * (i + 1); 5247 for (j = 0; j < bdev_num; j++) { 5248 bdev[j] = allocate_bdev(name[j]); 5249 height = rb_tree_get_height(&bdev[j]->internal.bdev_name); 5250 CU_ASSERT(height <= (int)(spdk_u32log2(2 * j + 2))); 5251 } 5252 SPDK_NOTICELOG("alloc bdev num %d takes %" PRIu64 " ms\n", bdev_num, 5253 (get_ns_time() - last_time) / 1000 / 1000); 5254 for (j = 0; j < bdev_num; j++) { 5255 CU_ASSERT(spdk_bdev_get_by_name(name[j]) != NULL); 5256 } 5257 CU_ASSERT(spdk_bdev_get_by_name(noexist_name) == NULL); 5258 5259 for (j = 0; j < bdev_num; j++) { 5260 free_bdev(bdev[j]); 5261 } 5262 for (j = 0; j < bdev_num; j++) { 5263 CU_ASSERT(spdk_bdev_get_by_name(name[j]) == NULL); 5264 } 5265 } 5266 } 5267 5268 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5269 5270 static int 5271 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5272 int array_size) 5273 { 5274 if (array_size > 0 && domains) { 5275 domains[0] = g_bdev_memory_domain; 5276 } 5277 5278 return 1; 5279 } 5280 5281 static void 5282 bdev_get_memory_domains(void) 5283 { 5284 struct spdk_bdev_fn_table fn_table = { 5285 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5286 }; 5287 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5288 struct spdk_memory_domain *domains[2] = {}; 5289 int rc; 5290 5291 /* bdev is NULL */ 5292 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5293 CU_ASSERT(rc == -EINVAL); 5294 5295 /* domains is NULL */ 5296 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5297 CU_ASSERT(rc == 1); 5298 5299 /* array size is 0 */ 5300 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5301 CU_ASSERT(rc == 1); 5302 5303 /* get_supported_dma_device_types op is set */ 5304 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5305 CU_ASSERT(rc == 1); 5306 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5307 5308 /* get_supported_dma_device_types op is not set */ 5309 fn_table.get_memory_domains = NULL; 5310 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5311 CU_ASSERT(rc == 0); 5312 } 5313 5314 static void 5315 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5316 { 5317 struct spdk_bdev *bdev; 5318 struct spdk_bdev_desc *desc = NULL; 5319 struct spdk_io_channel *io_ch; 5320 char io_buf[512]; 5321 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5322 struct ut_expected_io *expected_io; 5323 int rc; 5324 5325 spdk_bdev_initialize(bdev_init_cb, NULL); 5326 5327 bdev = allocate_bdev("bdev0"); 5328 bdev->md_interleave = false; 5329 bdev->md_len = 8; 5330 5331 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5332 CU_ASSERT(rc == 0); 5333 SPDK_CU_ASSERT_FATAL(desc != NULL); 5334 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5335 io_ch = spdk_bdev_get_io_channel(desc); 5336 CU_ASSERT(io_ch != NULL); 5337 5338 /* read */ 5339 g_io_done = false; 5340 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5341 if (ext_io_opts) { 5342 expected_io->md_buf = ext_io_opts->metadata; 5343 expected_io->ext_io_opts = ext_io_opts; 5344 } 5345 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5346 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5347 5348 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5349 5350 CU_ASSERT(rc == 0); 5351 CU_ASSERT(g_io_done == false); 5352 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5353 stub_complete_io(1); 5354 CU_ASSERT(g_io_done == true); 5355 5356 /* write */ 5357 g_io_done = false; 5358 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5359 if (ext_io_opts) { 5360 expected_io->md_buf = ext_io_opts->metadata; 5361 expected_io->ext_io_opts = ext_io_opts; 5362 } 5363 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5364 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5365 5366 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5367 5368 CU_ASSERT(rc == 0); 5369 CU_ASSERT(g_io_done == false); 5370 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5371 stub_complete_io(1); 5372 CU_ASSERT(g_io_done == true); 5373 5374 spdk_put_io_channel(io_ch); 5375 spdk_bdev_close(desc); 5376 free_bdev(bdev); 5377 spdk_bdev_finish(bdev_fini_cb, NULL); 5378 poll_threads(); 5379 5380 } 5381 5382 static void 5383 bdev_io_ext(void) 5384 { 5385 struct spdk_bdev_ext_io_opts ext_io_opts = { 5386 .metadata = (void *)0xFF000000, 5387 .size = sizeof(ext_io_opts) 5388 }; 5389 5390 _bdev_io_ext(&ext_io_opts); 5391 } 5392 5393 static void 5394 bdev_io_ext_no_opts(void) 5395 { 5396 _bdev_io_ext(NULL); 5397 } 5398 5399 static void 5400 bdev_io_ext_invalid_opts(void) 5401 { 5402 struct spdk_bdev *bdev; 5403 struct spdk_bdev_desc *desc = NULL; 5404 struct spdk_io_channel *io_ch; 5405 char io_buf[512]; 5406 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5407 struct spdk_bdev_ext_io_opts ext_io_opts = { 5408 .metadata = (void *)0xFF000000, 5409 .size = sizeof(ext_io_opts) 5410 }; 5411 int rc; 5412 5413 spdk_bdev_initialize(bdev_init_cb, NULL); 5414 5415 bdev = allocate_bdev("bdev0"); 5416 bdev->md_interleave = false; 5417 bdev->md_len = 8; 5418 5419 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5420 CU_ASSERT(rc == 0); 5421 SPDK_CU_ASSERT_FATAL(desc != NULL); 5422 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5423 io_ch = spdk_bdev_get_io_channel(desc); 5424 CU_ASSERT(io_ch != NULL); 5425 5426 /* Test invalid ext_opts size */ 5427 ext_io_opts.size = 0; 5428 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5429 CU_ASSERT(rc == -EINVAL); 5430 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5431 CU_ASSERT(rc == -EINVAL); 5432 5433 ext_io_opts.size = sizeof(ext_io_opts) * 2; 5434 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5435 CU_ASSERT(rc == -EINVAL); 5436 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5437 CU_ASSERT(rc == -EINVAL); 5438 5439 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 5440 sizeof(ext_io_opts.metadata) - 1; 5441 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5442 CU_ASSERT(rc == -EINVAL); 5443 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5444 CU_ASSERT(rc == -EINVAL); 5445 5446 spdk_put_io_channel(io_ch); 5447 spdk_bdev_close(desc); 5448 free_bdev(bdev); 5449 spdk_bdev_finish(bdev_fini_cb, NULL); 5450 poll_threads(); 5451 } 5452 5453 static void 5454 bdev_io_ext_split(void) 5455 { 5456 struct spdk_bdev *bdev; 5457 struct spdk_bdev_desc *desc = NULL; 5458 struct spdk_io_channel *io_ch; 5459 char io_buf[512]; 5460 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5461 struct ut_expected_io *expected_io; 5462 struct spdk_bdev_ext_io_opts ext_io_opts = { 5463 .metadata = (void *)0xFF000000, 5464 .size = sizeof(ext_io_opts) 5465 }; 5466 int rc; 5467 5468 spdk_bdev_initialize(bdev_init_cb, NULL); 5469 5470 bdev = allocate_bdev("bdev0"); 5471 bdev->md_interleave = false; 5472 bdev->md_len = 8; 5473 5474 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5475 CU_ASSERT(rc == 0); 5476 SPDK_CU_ASSERT_FATAL(desc != NULL); 5477 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5478 io_ch = spdk_bdev_get_io_channel(desc); 5479 CU_ASSERT(io_ch != NULL); 5480 5481 /* Check that IO request with ext_opts and metadata is split correctly 5482 * Offset 14, length 8, payload 0xF000 5483 * Child - Offset 14, length 2, payload 0xF000 5484 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5485 */ 5486 bdev->optimal_io_boundary = 16; 5487 bdev->split_on_optimal_io_boundary = true; 5488 bdev->md_interleave = false; 5489 bdev->md_len = 8; 5490 5491 iov.iov_base = (void *)0xF000; 5492 iov.iov_len = 4096; 5493 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 5494 ext_io_opts.metadata = (void *)0xFF000000; 5495 ext_io_opts.size = sizeof(ext_io_opts); 5496 g_io_done = false; 5497 5498 /* read */ 5499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 5500 expected_io->md_buf = ext_io_opts.metadata; 5501 expected_io->ext_io_opts = &ext_io_opts; 5502 expected_io->copy_opts = true; 5503 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5504 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5505 5506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 5507 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5508 expected_io->ext_io_opts = &ext_io_opts; 5509 expected_io->copy_opts = true; 5510 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5511 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5512 5513 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5514 CU_ASSERT(rc == 0); 5515 CU_ASSERT(g_io_done == false); 5516 5517 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5518 stub_complete_io(2); 5519 CU_ASSERT(g_io_done == true); 5520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5521 5522 /* write */ 5523 g_io_done = false; 5524 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 5525 expected_io->md_buf = ext_io_opts.metadata; 5526 expected_io->ext_io_opts = &ext_io_opts; 5527 expected_io->copy_opts = true; 5528 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 5529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5530 5531 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 5532 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 5533 expected_io->ext_io_opts = &ext_io_opts; 5534 expected_io->copy_opts = true; 5535 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 5536 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5537 5538 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 5539 CU_ASSERT(rc == 0); 5540 CU_ASSERT(g_io_done == false); 5541 5542 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5543 stub_complete_io(2); 5544 CU_ASSERT(g_io_done == true); 5545 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5546 5547 spdk_put_io_channel(io_ch); 5548 spdk_bdev_close(desc); 5549 free_bdev(bdev); 5550 spdk_bdev_finish(bdev_fini_cb, NULL); 5551 poll_threads(); 5552 } 5553 5554 static void 5555 bdev_io_ext_bounce_buffer(void) 5556 { 5557 struct spdk_bdev *bdev; 5558 struct spdk_bdev_desc *desc = NULL; 5559 struct spdk_io_channel *io_ch; 5560 char io_buf[512]; 5561 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5562 struct ut_expected_io *expected_io; 5563 struct spdk_bdev_ext_io_opts ext_io_opts = { 5564 .metadata = (void *)0xFF000000, 5565 .size = sizeof(ext_io_opts) 5566 }; 5567 int rc; 5568 5569 spdk_bdev_initialize(bdev_init_cb, NULL); 5570 5571 bdev = allocate_bdev("bdev0"); 5572 bdev->md_interleave = false; 5573 bdev->md_len = 8; 5574 5575 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5576 CU_ASSERT(rc == 0); 5577 SPDK_CU_ASSERT_FATAL(desc != NULL); 5578 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5579 io_ch = spdk_bdev_get_io_channel(desc); 5580 CU_ASSERT(io_ch != NULL); 5581 5582 /* Verify data pull/push 5583 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 5584 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 5585 5586 /* read */ 5587 g_io_done = false; 5588 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5589 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5590 expected_io->ext_io_opts = &ext_io_opts; 5591 expected_io->copy_opts = true; 5592 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5593 5594 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5595 5596 CU_ASSERT(rc == 0); 5597 CU_ASSERT(g_io_done == false); 5598 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5599 stub_complete_io(1); 5600 CU_ASSERT(g_memory_domain_push_data_called == true); 5601 CU_ASSERT(g_io_done == true); 5602 5603 /* write */ 5604 g_io_done = false; 5605 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5606 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5607 expected_io->ext_io_opts = &ext_io_opts; 5608 expected_io->copy_opts = true; 5609 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5610 5611 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 5612 5613 CU_ASSERT(rc == 0); 5614 CU_ASSERT(g_memory_domain_pull_data_called == true); 5615 CU_ASSERT(g_io_done == false); 5616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5617 stub_complete_io(1); 5618 CU_ASSERT(g_io_done == true); 5619 5620 spdk_put_io_channel(io_ch); 5621 spdk_bdev_close(desc); 5622 free_bdev(bdev); 5623 spdk_bdev_finish(bdev_fini_cb, NULL); 5624 poll_threads(); 5625 } 5626 5627 static void 5628 bdev_register_uuid_alias(void) 5629 { 5630 struct spdk_bdev *bdev, *second; 5631 char uuid[SPDK_UUID_STRING_LEN]; 5632 int rc; 5633 5634 spdk_bdev_initialize(bdev_init_cb, NULL); 5635 bdev = allocate_bdev("bdev0"); 5636 5637 /* Make sure an UUID was generated */ 5638 CU_ASSERT_FALSE(spdk_mem_all_zero(&bdev->uuid, sizeof(bdev->uuid))); 5639 5640 /* Check that an UUID alias was registered */ 5641 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5642 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5643 5644 /* Unregister the bdev */ 5645 spdk_bdev_unregister(bdev, NULL, NULL); 5646 poll_threads(); 5647 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5648 5649 /* Check the same, but this time register the bdev with non-zero UUID */ 5650 rc = spdk_bdev_register(bdev); 5651 CU_ASSERT_EQUAL(rc, 0); 5652 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5653 5654 /* Unregister the bdev */ 5655 spdk_bdev_unregister(bdev, NULL, NULL); 5656 poll_threads(); 5657 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5658 5659 /* Regiser the bdev using UUID as the name */ 5660 bdev->name = uuid; 5661 rc = spdk_bdev_register(bdev); 5662 CU_ASSERT_EQUAL(rc, 0); 5663 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5664 5665 /* Unregister the bdev */ 5666 spdk_bdev_unregister(bdev, NULL, NULL); 5667 poll_threads(); 5668 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 5669 5670 /* Check that it's not possible to register two bdevs with the same UUIDs */ 5671 bdev->name = "bdev0"; 5672 second = allocate_bdev("bdev1"); 5673 spdk_uuid_copy(&bdev->uuid, &second->uuid); 5674 rc = spdk_bdev_register(bdev); 5675 CU_ASSERT_EQUAL(rc, -EEXIST); 5676 5677 /* Regenerate the UUID and re-check */ 5678 spdk_uuid_generate(&bdev->uuid); 5679 rc = spdk_bdev_register(bdev); 5680 CU_ASSERT_EQUAL(rc, 0); 5681 5682 /* And check that both bdevs can be retrieved through their UUIDs */ 5683 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 5684 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 5685 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 5686 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 5687 5688 free_bdev(second); 5689 free_bdev(bdev); 5690 spdk_bdev_finish(bdev_fini_cb, NULL); 5691 poll_threads(); 5692 } 5693 5694 static void 5695 bdev_unregister_by_name(void) 5696 { 5697 struct spdk_bdev *bdev; 5698 int rc; 5699 5700 bdev = allocate_bdev("bdev"); 5701 5702 g_event_type1 = 0xFF; 5703 g_unregister_arg = NULL; 5704 g_unregister_rc = -1; 5705 5706 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5707 CU_ASSERT(rc == -ENODEV); 5708 5709 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5710 CU_ASSERT(rc == -ENODEV); 5711 5712 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 5713 CU_ASSERT(rc == 0); 5714 5715 /* Check that unregister callback is delayed */ 5716 CU_ASSERT(g_unregister_arg == NULL); 5717 CU_ASSERT(g_unregister_rc == -1); 5718 5719 poll_threads(); 5720 5721 /* Event callback shall not be issued because device was closed */ 5722 CU_ASSERT(g_event_type1 == 0xFF); 5723 /* Unregister callback is issued */ 5724 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 5725 CU_ASSERT(g_unregister_rc == 0); 5726 5727 free_bdev(bdev); 5728 } 5729 5730 static int 5731 count_bdevs(void *ctx, struct spdk_bdev *bdev) 5732 { 5733 int *count = ctx; 5734 5735 (*count)++; 5736 5737 return 0; 5738 } 5739 5740 static void 5741 for_each_bdev_test(void) 5742 { 5743 struct spdk_bdev *bdev[8]; 5744 int rc, count; 5745 5746 bdev[0] = allocate_bdev("bdev0"); 5747 5748 bdev[1] = allocate_bdev("bdev1"); 5749 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 5750 CU_ASSERT(rc == 0); 5751 5752 bdev[2] = allocate_bdev("bdev2"); 5753 5754 bdev[3] = allocate_bdev("bdev3"); 5755 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 5756 CU_ASSERT(rc == 0); 5757 5758 bdev[4] = allocate_bdev("bdev4"); 5759 5760 bdev[5] = allocate_bdev("bdev5"); 5761 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 5762 CU_ASSERT(rc == 0); 5763 5764 bdev[6] = allocate_bdev("bdev6"); 5765 5766 bdev[7] = allocate_bdev("bdev7"); 5767 5768 count = 0; 5769 rc = spdk_for_each_bdev(&count, count_bdevs); 5770 CU_ASSERT(rc == 0); 5771 CU_ASSERT(count == 8); 5772 5773 count = 0; 5774 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 5775 CU_ASSERT(rc == 0); 5776 CU_ASSERT(count == 5); 5777 5778 free_bdev(bdev[0]); 5779 free_bdev(bdev[1]); 5780 free_bdev(bdev[2]); 5781 free_bdev(bdev[3]); 5782 free_bdev(bdev[4]); 5783 free_bdev(bdev[5]); 5784 free_bdev(bdev[6]); 5785 free_bdev(bdev[7]); 5786 } 5787 5788 static void 5789 bdev_seek_test(void) 5790 { 5791 struct spdk_bdev *bdev; 5792 struct spdk_bdev_desc *desc = NULL; 5793 struct spdk_io_channel *io_ch; 5794 int rc; 5795 5796 spdk_bdev_initialize(bdev_init_cb, NULL); 5797 poll_threads(); 5798 5799 bdev = allocate_bdev("bdev0"); 5800 5801 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5802 CU_ASSERT(rc == 0); 5803 poll_threads(); 5804 SPDK_CU_ASSERT_FATAL(desc != NULL); 5805 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5806 io_ch = spdk_bdev_get_io_channel(desc); 5807 CU_ASSERT(io_ch != NULL); 5808 5809 /* Seek data not supported */ 5810 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 5811 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5812 CU_ASSERT(rc == 0); 5813 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5814 poll_threads(); 5815 CU_ASSERT(g_seek_offset == 0); 5816 5817 /* Seek hole not supported */ 5818 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 5819 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5820 CU_ASSERT(rc == 0); 5821 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5822 poll_threads(); 5823 CU_ASSERT(g_seek_offset == UINT64_MAX); 5824 5825 /* Seek data supported */ 5826 g_seek_data_offset = 12345; 5827 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 5828 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 5829 CU_ASSERT(rc == 0); 5830 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5831 stub_complete_io(1); 5832 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5833 CU_ASSERT(g_seek_offset == 12345); 5834 5835 /* Seek hole supported */ 5836 g_seek_hole_offset = 67890; 5837 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 5838 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 5839 CU_ASSERT(rc == 0); 5840 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5841 stub_complete_io(1); 5842 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5843 CU_ASSERT(g_seek_offset == 67890); 5844 5845 spdk_put_io_channel(io_ch); 5846 spdk_bdev_close(desc); 5847 free_bdev(bdev); 5848 spdk_bdev_finish(bdev_fini_cb, NULL); 5849 poll_threads(); 5850 } 5851 5852 int 5853 main(int argc, char **argv) 5854 { 5855 CU_pSuite suite = NULL; 5856 unsigned int num_failures; 5857 5858 CU_set_error_action(CUEA_ABORT); 5859 CU_initialize_registry(); 5860 5861 suite = CU_add_suite("bdev", null_init, null_clean); 5862 5863 CU_ADD_TEST(suite, bytes_to_blocks_test); 5864 CU_ADD_TEST(suite, num_blocks_test); 5865 CU_ADD_TEST(suite, io_valid_test); 5866 CU_ADD_TEST(suite, open_write_test); 5867 CU_ADD_TEST(suite, claim_test); 5868 CU_ADD_TEST(suite, alias_add_del_test); 5869 CU_ADD_TEST(suite, get_device_stat_test); 5870 CU_ADD_TEST(suite, bdev_io_types_test); 5871 CU_ADD_TEST(suite, bdev_io_wait_test); 5872 CU_ADD_TEST(suite, bdev_io_spans_split_test); 5873 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 5874 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 5875 CU_ADD_TEST(suite, bdev_io_mix_split_test); 5876 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 5877 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 5878 CU_ADD_TEST(suite, bdev_io_alignment); 5879 CU_ADD_TEST(suite, bdev_histograms); 5880 CU_ADD_TEST(suite, bdev_write_zeroes); 5881 CU_ADD_TEST(suite, bdev_compare_and_write); 5882 CU_ADD_TEST(suite, bdev_compare); 5883 CU_ADD_TEST(suite, bdev_compare_emulated); 5884 CU_ADD_TEST(suite, bdev_zcopy_write); 5885 CU_ADD_TEST(suite, bdev_zcopy_read); 5886 CU_ADD_TEST(suite, bdev_open_while_hotremove); 5887 CU_ADD_TEST(suite, bdev_close_while_hotremove); 5888 CU_ADD_TEST(suite, bdev_open_ext); 5889 CU_ADD_TEST(suite, bdev_open_ext_unregister); 5890 CU_ADD_TEST(suite, bdev_set_io_timeout); 5891 CU_ADD_TEST(suite, bdev_set_qd_sampling); 5892 CU_ADD_TEST(suite, lba_range_overlap); 5893 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 5894 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 5895 CU_ADD_TEST(suite, lock_lba_range_overlapped); 5896 CU_ADD_TEST(suite, bdev_io_abort); 5897 CU_ADD_TEST(suite, bdev_unmap); 5898 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 5899 CU_ADD_TEST(suite, bdev_set_options_test); 5900 CU_ADD_TEST(suite, bdev_multi_allocation); 5901 CU_ADD_TEST(suite, bdev_get_memory_domains); 5902 CU_ADD_TEST(suite, bdev_io_ext); 5903 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 5904 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 5905 CU_ADD_TEST(suite, bdev_io_ext_split); 5906 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 5907 CU_ADD_TEST(suite, bdev_register_uuid_alias); 5908 CU_ADD_TEST(suite, bdev_unregister_by_name); 5909 CU_ADD_TEST(suite, for_each_bdev_test); 5910 CU_ADD_TEST(suite, bdev_seek_test); 5911 5912 allocate_cores(1); 5913 allocate_threads(1); 5914 set_thread(0); 5915 5916 CU_basic_set_mode(CU_BRM_VERBOSE); 5917 CU_basic_run_tests(); 5918 num_failures = CU_get_number_of_failures(); 5919 CU_cleanup_registry(); 5920 5921 free_threads(); 5922 free_cores(); 5923 5924 return num_failures; 5925 } 5926