1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2017 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 9 #include "common/lib/ut_multithread.c" 10 #include "unit/lib/json_mock.c" 11 12 #include "spdk/config.h" 13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */ 14 #undef SPDK_CONFIG_VTUNE 15 16 #include "bdev/bdev.c" 17 18 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0); 19 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL); 20 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain), 21 "test_domain"); 22 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 23 (struct spdk_memory_domain *domain), 0); 24 DEFINE_STUB_V(spdk_accel_sequence_finish, 25 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 26 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 27 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 28 DEFINE_STUB(spdk_accel_append_copy, int, 29 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs, 30 uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 31 struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain, 32 void *src_domain_ctx, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 33 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL); 34 35 static bool g_memory_domain_pull_data_called; 36 static bool g_memory_domain_push_data_called; 37 static int g_accel_io_device; 38 39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int); 40 int 41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 42 struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt, 43 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 44 { 45 g_memory_domain_pull_data_called = true; 46 HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data); 47 cpl_cb(cpl_cb_arg, 0); 48 return 0; 49 } 50 51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int); 52 int 53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 54 struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt, 55 spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg) 56 { 57 g_memory_domain_push_data_called = true; 58 HANDLE_RETURN_MOCK(spdk_memory_domain_push_data); 59 cpl_cb(cpl_cb_arg, 0); 60 return 0; 61 } 62 63 struct spdk_io_channel * 64 spdk_accel_get_io_channel(void) 65 { 66 return spdk_get_io_channel(&g_accel_io_device); 67 } 68 69 int g_status; 70 int g_count; 71 enum spdk_bdev_event_type g_event_type1; 72 enum spdk_bdev_event_type g_event_type2; 73 enum spdk_bdev_event_type g_event_type3; 74 enum spdk_bdev_event_type g_event_type4; 75 struct spdk_histogram_data *g_histogram; 76 void *g_unregister_arg; 77 int g_unregister_rc; 78 79 void 80 spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io, 81 int *sc, int *sk, int *asc, int *ascq) 82 { 83 } 84 85 static int 86 ut_accel_ch_create_cb(void *io_device, void *ctx) 87 { 88 return 0; 89 } 90 91 static void 92 ut_accel_ch_destroy_cb(void *io_device, void *ctx) 93 { 94 } 95 96 static int 97 ut_bdev_setup(void) 98 { 99 spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb, 100 ut_accel_ch_destroy_cb, 0, NULL); 101 return 0; 102 } 103 104 static int 105 ut_bdev_teardown(void) 106 { 107 spdk_io_device_unregister(&g_accel_io_device, NULL); 108 109 return 0; 110 } 111 112 static int 113 stub_destruct(void *ctx) 114 { 115 return 0; 116 } 117 118 struct ut_expected_io { 119 uint8_t type; 120 uint64_t offset; 121 uint64_t src_offset; 122 uint64_t length; 123 int iovcnt; 124 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 125 void *md_buf; 126 TAILQ_ENTRY(ut_expected_io) link; 127 }; 128 129 struct bdev_ut_io { 130 TAILQ_ENTRY(bdev_ut_io) link; 131 }; 132 133 struct bdev_ut_channel { 134 TAILQ_HEAD(, bdev_ut_io) outstanding_io; 135 uint32_t outstanding_io_count; 136 TAILQ_HEAD(, ut_expected_io) expected_io; 137 }; 138 139 static bool g_io_done; 140 static struct spdk_bdev_io *g_bdev_io; 141 static enum spdk_bdev_io_status g_io_status; 142 static enum spdk_bdev_io_status g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 143 static uint32_t g_bdev_ut_io_device; 144 static struct bdev_ut_channel *g_bdev_ut_channel; 145 static void *g_compare_read_buf; 146 static uint32_t g_compare_read_buf_len; 147 static void *g_compare_write_buf; 148 static uint32_t g_compare_write_buf_len; 149 static void *g_compare_md_buf; 150 static bool g_abort_done; 151 static enum spdk_bdev_io_status g_abort_status; 152 static void *g_zcopy_read_buf; 153 static uint32_t g_zcopy_read_buf_len; 154 static void *g_zcopy_write_buf; 155 static uint32_t g_zcopy_write_buf_len; 156 static struct spdk_bdev_io *g_zcopy_bdev_io; 157 static uint64_t g_seek_data_offset; 158 static uint64_t g_seek_hole_offset; 159 static uint64_t g_seek_offset; 160 161 static struct ut_expected_io * 162 ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt) 163 { 164 struct ut_expected_io *expected_io; 165 166 expected_io = calloc(1, sizeof(*expected_io)); 167 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 168 169 expected_io->type = type; 170 expected_io->offset = offset; 171 expected_io->length = length; 172 expected_io->iovcnt = iovcnt; 173 174 return expected_io; 175 } 176 177 static struct ut_expected_io * 178 ut_alloc_expected_copy_io(uint8_t type, uint64_t offset, uint64_t src_offset, uint64_t length) 179 { 180 struct ut_expected_io *expected_io; 181 182 expected_io = calloc(1, sizeof(*expected_io)); 183 SPDK_CU_ASSERT_FATAL(expected_io != NULL); 184 185 expected_io->type = type; 186 expected_io->offset = offset; 187 expected_io->src_offset = src_offset; 188 expected_io->length = length; 189 190 return expected_io; 191 } 192 193 static void 194 ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len) 195 { 196 expected_io->iov[pos].iov_base = base; 197 expected_io->iov[pos].iov_len = len; 198 } 199 200 static void 201 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 202 { 203 struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch); 204 struct ut_expected_io *expected_io; 205 struct iovec *iov, *expected_iov; 206 struct spdk_bdev_io *bio_to_abort; 207 struct bdev_ut_io *bio; 208 int i; 209 210 g_bdev_io = bdev_io; 211 212 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 213 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 214 215 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 216 CU_ASSERT(g_compare_read_buf_len == len); 217 memcpy(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len); 218 if (bdev_io->bdev->md_len && bdev_io->u.bdev.md_buf && g_compare_md_buf) { 219 memcpy(bdev_io->u.bdev.md_buf, g_compare_md_buf, 220 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks); 221 } 222 } 223 224 if (g_compare_write_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 225 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 226 227 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 228 CU_ASSERT(g_compare_write_buf_len == len); 229 memcpy(g_compare_write_buf, bdev_io->u.bdev.iovs[0].iov_base, len); 230 } 231 232 if (g_compare_read_buf && bdev_io->type == SPDK_BDEV_IO_TYPE_COMPARE) { 233 uint32_t len = bdev_io->u.bdev.iovs[0].iov_len; 234 235 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 236 CU_ASSERT(g_compare_read_buf_len == len); 237 if (memcmp(bdev_io->u.bdev.iovs[0].iov_base, g_compare_read_buf, len)) { 238 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 239 } 240 if (bdev_io->u.bdev.md_buf && 241 memcmp(bdev_io->u.bdev.md_buf, g_compare_md_buf, 242 bdev_io->bdev->md_len * bdev_io->u.bdev.num_blocks)) { 243 g_io_exp_status = SPDK_BDEV_IO_STATUS_MISCOMPARE; 244 } 245 } 246 247 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) { 248 if (g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS) { 249 TAILQ_FOREACH(bio, &ch->outstanding_io, link) { 250 bio_to_abort = spdk_bdev_io_from_ctx(bio); 251 if (bio_to_abort == bdev_io->u.abort.bio_to_abort) { 252 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 253 ch->outstanding_io_count--; 254 spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_FAILED); 255 break; 256 } 257 } 258 } 259 } 260 261 if (bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) { 262 if (bdev_io->u.bdev.zcopy.start) { 263 g_zcopy_bdev_io = bdev_io; 264 if (bdev_io->u.bdev.zcopy.populate) { 265 /* Start of a read */ 266 CU_ASSERT(g_zcopy_read_buf != NULL); 267 CU_ASSERT(g_zcopy_read_buf_len > 0); 268 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_read_buf; 269 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_read_buf_len; 270 bdev_io->u.bdev.iovcnt = 1; 271 } else { 272 /* Start of a write */ 273 CU_ASSERT(g_zcopy_write_buf != NULL); 274 CU_ASSERT(g_zcopy_write_buf_len > 0); 275 bdev_io->u.bdev.iovs[0].iov_base = g_zcopy_write_buf; 276 bdev_io->u.bdev.iovs[0].iov_len = g_zcopy_write_buf_len; 277 bdev_io->u.bdev.iovcnt = 1; 278 } 279 } else { 280 if (bdev_io->u.bdev.zcopy.commit) { 281 /* End of write */ 282 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_write_buf); 283 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_write_buf_len); 284 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 285 g_zcopy_write_buf = NULL; 286 g_zcopy_write_buf_len = 0; 287 } else { 288 /* End of read */ 289 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_base == g_zcopy_read_buf); 290 CU_ASSERT(bdev_io->u.bdev.iovs[0].iov_len == g_zcopy_read_buf_len); 291 CU_ASSERT(bdev_io->u.bdev.iovcnt == 1); 292 g_zcopy_read_buf = NULL; 293 g_zcopy_read_buf_len = 0; 294 } 295 } 296 } 297 298 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_DATA) { 299 bdev_io->u.bdev.seek.offset = g_seek_data_offset; 300 } 301 302 if (bdev_io->type == SPDK_BDEV_IO_TYPE_SEEK_HOLE) { 303 bdev_io->u.bdev.seek.offset = g_seek_hole_offset; 304 } 305 306 TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct bdev_ut_io *)bdev_io->driver_ctx, link); 307 ch->outstanding_io_count++; 308 309 expected_io = TAILQ_FIRST(&ch->expected_io); 310 if (expected_io == NULL) { 311 return; 312 } 313 TAILQ_REMOVE(&ch->expected_io, expected_io, link); 314 315 if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) { 316 CU_ASSERT(bdev_io->type == expected_io->type); 317 } 318 319 if (expected_io->md_buf != NULL) { 320 CU_ASSERT(expected_io->md_buf == bdev_io->u.bdev.md_buf); 321 } 322 323 if (expected_io->length == 0) { 324 free(expected_io); 325 return; 326 } 327 328 CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks); 329 CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks); 330 if (expected_io->type == SPDK_BDEV_IO_TYPE_COPY) { 331 CU_ASSERT(expected_io->src_offset == bdev_io->u.bdev.copy.src_offset_blocks); 332 } 333 334 if (expected_io->iovcnt == 0) { 335 free(expected_io); 336 /* UNMAP, WRITE_ZEROES, FLUSH and COPY don't have iovs, so we can just return now. */ 337 return; 338 } 339 340 CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt); 341 for (i = 0; i < expected_io->iovcnt; i++) { 342 expected_iov = &expected_io->iov[i]; 343 if (bdev_io->internal.f.has_bounce_buf == false) { 344 iov = &bdev_io->u.bdev.iovs[i]; 345 } else { 346 iov = bdev_io->internal.bounce_buf.orig_iovs; 347 } 348 CU_ASSERT(iov->iov_len == expected_iov->iov_len); 349 CU_ASSERT(iov->iov_base == expected_iov->iov_base); 350 } 351 352 free(expected_io); 353 } 354 355 static void 356 stub_submit_request_get_buf_cb(struct spdk_io_channel *_ch, 357 struct spdk_bdev_io *bdev_io, bool success) 358 { 359 CU_ASSERT(success == true); 360 361 stub_submit_request(_ch, bdev_io); 362 } 363 364 static void 365 stub_submit_request_get_buf(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io) 366 { 367 spdk_bdev_io_get_buf(bdev_io, stub_submit_request_get_buf_cb, 368 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 369 } 370 371 static uint32_t 372 stub_complete_io(uint32_t num_to_complete) 373 { 374 struct bdev_ut_channel *ch = g_bdev_ut_channel; 375 struct bdev_ut_io *bio; 376 struct spdk_bdev_io *bdev_io; 377 static enum spdk_bdev_io_status io_status; 378 uint32_t num_completed = 0; 379 380 while (num_completed < num_to_complete) { 381 if (TAILQ_EMPTY(&ch->outstanding_io)) { 382 break; 383 } 384 bio = TAILQ_FIRST(&ch->outstanding_io); 385 TAILQ_REMOVE(&ch->outstanding_io, bio, link); 386 bdev_io = spdk_bdev_io_from_ctx(bio); 387 ch->outstanding_io_count--; 388 io_status = g_io_exp_status == SPDK_BDEV_IO_STATUS_SUCCESS ? SPDK_BDEV_IO_STATUS_SUCCESS : 389 g_io_exp_status; 390 spdk_bdev_io_complete(bdev_io, io_status); 391 num_completed++; 392 } 393 394 return num_completed; 395 } 396 397 static struct spdk_io_channel * 398 bdev_ut_get_io_channel(void *ctx) 399 { 400 return spdk_get_io_channel(&g_bdev_ut_io_device); 401 } 402 403 static bool g_io_types_supported[SPDK_BDEV_NUM_IO_TYPES] = { 404 [SPDK_BDEV_IO_TYPE_READ] = true, 405 [SPDK_BDEV_IO_TYPE_WRITE] = true, 406 [SPDK_BDEV_IO_TYPE_COMPARE] = true, 407 [SPDK_BDEV_IO_TYPE_UNMAP] = true, 408 [SPDK_BDEV_IO_TYPE_FLUSH] = true, 409 [SPDK_BDEV_IO_TYPE_RESET] = true, 410 [SPDK_BDEV_IO_TYPE_NVME_ADMIN] = true, 411 [SPDK_BDEV_IO_TYPE_NVME_IO] = true, 412 [SPDK_BDEV_IO_TYPE_NVME_IO_MD] = true, 413 [SPDK_BDEV_IO_TYPE_WRITE_ZEROES] = true, 414 [SPDK_BDEV_IO_TYPE_ZCOPY] = true, 415 [SPDK_BDEV_IO_TYPE_ABORT] = true, 416 [SPDK_BDEV_IO_TYPE_SEEK_HOLE] = true, 417 [SPDK_BDEV_IO_TYPE_SEEK_DATA] = true, 418 [SPDK_BDEV_IO_TYPE_COPY] = true, 419 }; 420 421 static void 422 ut_enable_io_type(enum spdk_bdev_io_type io_type, bool enable) 423 { 424 g_io_types_supported[io_type] = enable; 425 } 426 427 static bool 428 stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type) 429 { 430 return g_io_types_supported[io_type]; 431 } 432 433 static struct spdk_bdev_fn_table fn_table = { 434 .destruct = stub_destruct, 435 .submit_request = stub_submit_request, 436 .get_io_channel = bdev_ut_get_io_channel, 437 .io_type_supported = stub_io_type_supported, 438 }; 439 440 static int 441 bdev_ut_create_ch(void *io_device, void *ctx_buf) 442 { 443 struct bdev_ut_channel *ch = ctx_buf; 444 445 CU_ASSERT(g_bdev_ut_channel == NULL); 446 g_bdev_ut_channel = ch; 447 448 TAILQ_INIT(&ch->outstanding_io); 449 ch->outstanding_io_count = 0; 450 TAILQ_INIT(&ch->expected_io); 451 return 0; 452 } 453 454 static void 455 bdev_ut_destroy_ch(void *io_device, void *ctx_buf) 456 { 457 CU_ASSERT(g_bdev_ut_channel != NULL); 458 g_bdev_ut_channel = NULL; 459 } 460 461 struct spdk_bdev_module bdev_ut_if; 462 463 static int 464 bdev_ut_module_init(void) 465 { 466 spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch, 467 sizeof(struct bdev_ut_channel), NULL); 468 spdk_bdev_module_init_done(&bdev_ut_if); 469 return 0; 470 } 471 472 static void 473 bdev_ut_module_fini(void) 474 { 475 spdk_io_device_unregister(&g_bdev_ut_io_device, NULL); 476 } 477 478 struct spdk_bdev_module bdev_ut_if = { 479 .name = "bdev_ut", 480 .module_init = bdev_ut_module_init, 481 .module_fini = bdev_ut_module_fini, 482 .async_init = true, 483 }; 484 485 static void vbdev_ut_examine_config(struct spdk_bdev *bdev); 486 static void vbdev_ut_examine_disk(struct spdk_bdev *bdev); 487 488 static int 489 vbdev_ut_module_init(void) 490 { 491 return 0; 492 } 493 494 static void 495 vbdev_ut_module_fini(void) 496 { 497 } 498 499 static int 500 vbdev_ut_get_ctx_size(void) 501 { 502 return sizeof(struct bdev_ut_io); 503 } 504 505 struct spdk_bdev_module vbdev_ut_if = { 506 .name = "vbdev_ut", 507 .module_init = vbdev_ut_module_init, 508 .module_fini = vbdev_ut_module_fini, 509 .examine_config = vbdev_ut_examine_config, 510 .examine_disk = vbdev_ut_examine_disk, 511 .get_ctx_size = vbdev_ut_get_ctx_size, 512 }; 513 514 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if) 515 SPDK_BDEV_MODULE_REGISTER(vbdev_ut, &vbdev_ut_if) 516 517 struct ut_examine_ctx { 518 void (*examine_config)(struct spdk_bdev *bdev); 519 void (*examine_disk)(struct spdk_bdev *bdev); 520 uint32_t examine_config_count; 521 uint32_t examine_disk_count; 522 }; 523 524 static void 525 vbdev_ut_examine_config(struct spdk_bdev *bdev) 526 { 527 struct ut_examine_ctx *ctx = bdev->ctxt; 528 529 if (ctx != NULL) { 530 ctx->examine_config_count++; 531 if (ctx->examine_config != NULL) { 532 ctx->examine_config(bdev); 533 } 534 } 535 536 spdk_bdev_module_examine_done(&vbdev_ut_if); 537 } 538 539 static void 540 vbdev_ut_examine_disk(struct spdk_bdev *bdev) 541 { 542 struct ut_examine_ctx *ctx = bdev->ctxt; 543 544 if (ctx != NULL) { 545 ctx->examine_disk_count++; 546 if (ctx->examine_disk != NULL) { 547 ctx->examine_disk(bdev); 548 } 549 } 550 551 spdk_bdev_module_examine_done(&vbdev_ut_if); 552 } 553 554 static void 555 bdev_init_cb(void *arg, int rc) 556 { 557 CU_ASSERT(rc == 0); 558 } 559 560 static void 561 bdev_fini_cb(void *arg) 562 { 563 } 564 565 static void 566 ut_init_bdev(struct spdk_bdev_opts *opts) 567 { 568 int rc; 569 570 if (opts != NULL) { 571 rc = spdk_bdev_set_opts(opts); 572 CU_ASSERT(rc == 0); 573 } 574 rc = spdk_iobuf_initialize(); 575 CU_ASSERT(rc == 0); 576 spdk_bdev_initialize(bdev_init_cb, NULL); 577 poll_threads(); 578 } 579 580 static void 581 ut_fini_bdev(void) 582 { 583 spdk_bdev_finish(bdev_fini_cb, NULL); 584 spdk_iobuf_finish(bdev_fini_cb, NULL); 585 poll_threads(); 586 } 587 588 static struct spdk_bdev * 589 allocate_bdev_ctx(char *name, void *ctx) 590 { 591 struct spdk_bdev *bdev; 592 int rc; 593 594 bdev = calloc(1, sizeof(*bdev)); 595 SPDK_CU_ASSERT_FATAL(bdev != NULL); 596 597 bdev->ctxt = ctx; 598 bdev->name = name; 599 bdev->fn_table = &fn_table; 600 bdev->module = &bdev_ut_if; 601 bdev->blockcnt = 1024; 602 bdev->blocklen = 512; 603 604 spdk_uuid_generate(&bdev->uuid); 605 606 rc = spdk_bdev_register(bdev); 607 poll_threads(); 608 CU_ASSERT(rc == 0); 609 610 return bdev; 611 } 612 613 static struct spdk_bdev * 614 allocate_bdev(char *name) 615 { 616 return allocate_bdev_ctx(name, NULL); 617 } 618 619 static struct spdk_bdev * 620 allocate_vbdev(char *name) 621 { 622 struct spdk_bdev *bdev; 623 int rc; 624 625 bdev = calloc(1, sizeof(*bdev)); 626 SPDK_CU_ASSERT_FATAL(bdev != NULL); 627 628 bdev->name = name; 629 bdev->fn_table = &fn_table; 630 bdev->module = &vbdev_ut_if; 631 bdev->blockcnt = 1024; 632 bdev->blocklen = 512; 633 634 rc = spdk_bdev_register(bdev); 635 poll_threads(); 636 CU_ASSERT(rc == 0); 637 638 return bdev; 639 } 640 641 static void 642 free_bdev(struct spdk_bdev *bdev) 643 { 644 spdk_bdev_unregister(bdev, NULL, NULL); 645 poll_threads(); 646 memset(bdev, 0xFF, sizeof(*bdev)); 647 free(bdev); 648 } 649 650 static void 651 free_vbdev(struct spdk_bdev *bdev) 652 { 653 spdk_bdev_unregister(bdev, NULL, NULL); 654 poll_threads(); 655 memset(bdev, 0xFF, sizeof(*bdev)); 656 free(bdev); 657 } 658 659 static void 660 get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc) 661 { 662 const char *bdev_name; 663 664 CU_ASSERT(bdev != NULL); 665 CU_ASSERT(rc == 0); 666 bdev_name = spdk_bdev_get_name(bdev); 667 CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0"); 668 669 free(stat); 670 671 *(bool *)cb_arg = true; 672 } 673 674 static void 675 bdev_unregister_cb(void *cb_arg, int rc) 676 { 677 g_unregister_arg = cb_arg; 678 g_unregister_rc = rc; 679 } 680 681 static void 682 bdev_ut_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 683 { 684 } 685 686 static void 687 bdev_open_cb1(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 688 { 689 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 690 691 g_event_type1 = type; 692 if (SPDK_BDEV_EVENT_REMOVE == type) { 693 spdk_bdev_close(desc); 694 } 695 } 696 697 static void 698 bdev_open_cb2(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 699 { 700 struct spdk_bdev_desc *desc = *(struct spdk_bdev_desc **)event_ctx; 701 702 g_event_type2 = type; 703 if (SPDK_BDEV_EVENT_REMOVE == type) { 704 spdk_bdev_close(desc); 705 } 706 } 707 708 static void 709 bdev_open_cb3(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 710 { 711 g_event_type3 = type; 712 } 713 714 static void 715 bdev_open_cb4(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, void *event_ctx) 716 { 717 g_event_type4 = type; 718 } 719 720 static void 721 bdev_seek_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 722 { 723 g_seek_offset = spdk_bdev_io_get_seek_offset(bdev_io); 724 spdk_bdev_free_io(bdev_io); 725 } 726 727 static void 728 get_device_stat_test(void) 729 { 730 struct spdk_bdev *bdev; 731 struct spdk_bdev_io_stat *stat; 732 bool done; 733 734 bdev = allocate_bdev("bdev0"); 735 stat = calloc(1, sizeof(struct spdk_bdev_io_stat)); 736 if (stat == NULL) { 737 free_bdev(bdev); 738 return; 739 } 740 741 done = false; 742 spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, &done); 743 while (!done) { poll_threads(); } 744 745 free_bdev(bdev); 746 } 747 748 static void 749 open_write_test(void) 750 { 751 struct spdk_bdev *bdev[9]; 752 struct spdk_bdev_desc *desc[9] = {}; 753 int rc; 754 755 ut_init_bdev(NULL); 756 757 /* 758 * Create a tree of bdevs to test various open w/ write cases. 759 * 760 * bdev0 through bdev3 are physical block devices, such as NVMe 761 * namespaces or Ceph block devices. 762 * 763 * bdev4 is a virtual bdev with multiple base bdevs. This models 764 * caching or RAID use cases. 765 * 766 * bdev5 through bdev7 are all virtual bdevs with the same base 767 * bdev (except bdev7). This models partitioning or logical volume 768 * use cases. 769 * 770 * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs 771 * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This 772 * models caching, RAID, partitioning or logical volumes use cases. 773 * 774 * bdev8 is a virtual bdev with multiple base bdevs, but these 775 * base bdevs are themselves virtual bdevs. 776 * 777 * bdev8 778 * | 779 * +----------+ 780 * | | 781 * bdev4 bdev5 bdev6 bdev7 782 * | | | | 783 * +---+---+ +---+ + +---+---+ 784 * | | \ | / \ 785 * bdev0 bdev1 bdev2 bdev3 786 */ 787 788 bdev[0] = allocate_bdev("bdev0"); 789 rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if); 790 CU_ASSERT(rc == 0); 791 792 bdev[1] = allocate_bdev("bdev1"); 793 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 794 CU_ASSERT(rc == 0); 795 796 bdev[2] = allocate_bdev("bdev2"); 797 rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if); 798 CU_ASSERT(rc == 0); 799 800 bdev[3] = allocate_bdev("bdev3"); 801 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 802 CU_ASSERT(rc == 0); 803 804 bdev[4] = allocate_vbdev("bdev4"); 805 rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if); 806 CU_ASSERT(rc == 0); 807 808 bdev[5] = allocate_vbdev("bdev5"); 809 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 810 CU_ASSERT(rc == 0); 811 812 bdev[6] = allocate_vbdev("bdev6"); 813 814 bdev[7] = allocate_vbdev("bdev7"); 815 816 bdev[8] = allocate_vbdev("bdev8"); 817 818 /* Open bdev0 read-only. This should succeed. */ 819 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc[0]); 820 CU_ASSERT(rc == 0); 821 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 822 CU_ASSERT(bdev[0] == spdk_bdev_desc_get_bdev(desc[0])); 823 spdk_bdev_close(desc[0]); 824 825 /* 826 * Open bdev1 read/write. This should fail since bdev1 has been claimed 827 * by a vbdev module. 828 */ 829 rc = spdk_bdev_open_ext("bdev1", true, bdev_ut_event_cb, NULL, &desc[1]); 830 CU_ASSERT(rc == -EPERM); 831 832 /* 833 * Open bdev4 read/write. This should fail since bdev3 has been claimed 834 * by a vbdev module. 835 */ 836 rc = spdk_bdev_open_ext("bdev4", true, bdev_ut_event_cb, NULL, &desc[4]); 837 CU_ASSERT(rc == -EPERM); 838 839 /* Open bdev4 read-only. This should succeed. */ 840 rc = spdk_bdev_open_ext("bdev4", false, bdev_ut_event_cb, NULL, &desc[4]); 841 CU_ASSERT(rc == 0); 842 SPDK_CU_ASSERT_FATAL(desc[4] != NULL); 843 CU_ASSERT(bdev[4] == spdk_bdev_desc_get_bdev(desc[4])); 844 spdk_bdev_close(desc[4]); 845 846 /* 847 * Open bdev8 read/write. This should succeed since it is a leaf 848 * bdev. 849 */ 850 rc = spdk_bdev_open_ext("bdev8", true, bdev_ut_event_cb, NULL, &desc[8]); 851 CU_ASSERT(rc == 0); 852 SPDK_CU_ASSERT_FATAL(desc[8] != NULL); 853 CU_ASSERT(bdev[8] == spdk_bdev_desc_get_bdev(desc[8])); 854 spdk_bdev_close(desc[8]); 855 856 /* 857 * Open bdev5 read/write. This should fail since bdev4 has been claimed 858 * by a vbdev module. 859 */ 860 rc = spdk_bdev_open_ext("bdev5", true, bdev_ut_event_cb, NULL, &desc[5]); 861 CU_ASSERT(rc == -EPERM); 862 863 /* Open bdev4 read-only. This should succeed. */ 864 rc = spdk_bdev_open_ext("bdev5", false, bdev_ut_event_cb, NULL, &desc[5]); 865 CU_ASSERT(rc == 0); 866 SPDK_CU_ASSERT_FATAL(desc[5] != NULL); 867 CU_ASSERT(bdev[5] == spdk_bdev_desc_get_bdev(desc[5])); 868 spdk_bdev_close(desc[5]); 869 870 free_vbdev(bdev[8]); 871 872 free_vbdev(bdev[5]); 873 free_vbdev(bdev[6]); 874 free_vbdev(bdev[7]); 875 876 free_vbdev(bdev[4]); 877 878 free_bdev(bdev[0]); 879 free_bdev(bdev[1]); 880 free_bdev(bdev[2]); 881 free_bdev(bdev[3]); 882 883 ut_fini_bdev(); 884 } 885 886 static void 887 claim_test(void) 888 { 889 struct spdk_bdev *bdev; 890 struct spdk_bdev_desc *desc, *open_desc; 891 int rc; 892 uint32_t count; 893 894 ut_init_bdev(NULL); 895 896 /* 897 * A vbdev that uses a read-only bdev may need it to remain read-only. 898 * To do so, it opens the bdev read-only, then claims it without 899 * passing a spdk_bdev_desc. 900 */ 901 bdev = allocate_bdev("bdev0"); 902 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 903 CU_ASSERT(rc == 0); 904 CU_ASSERT(desc->write == false); 905 906 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 907 CU_ASSERT(rc == 0); 908 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 909 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 910 911 /* There should be only one open descriptor and it should still be ro */ 912 count = 0; 913 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 914 CU_ASSERT(open_desc == desc); 915 CU_ASSERT(!open_desc->write); 916 count++; 917 } 918 CU_ASSERT(count == 1); 919 920 /* A read-only bdev is upgraded to read-write if desc is passed. */ 921 spdk_bdev_module_release_bdev(bdev); 922 rc = spdk_bdev_module_claim_bdev(bdev, desc, &bdev_ut_if); 923 CU_ASSERT(rc == 0); 924 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 925 CU_ASSERT(bdev->internal.claim.v1.module == &bdev_ut_if); 926 927 /* There should be only one open descriptor and it should be rw */ 928 count = 0; 929 TAILQ_FOREACH(open_desc, &bdev->internal.open_descs, link) { 930 CU_ASSERT(open_desc == desc); 931 CU_ASSERT(open_desc->write); 932 count++; 933 } 934 CU_ASSERT(count == 1); 935 936 spdk_bdev_close(desc); 937 free_bdev(bdev); 938 ut_fini_bdev(); 939 } 940 941 static void 942 bytes_to_blocks_test(void) 943 { 944 struct spdk_bdev bdev; 945 uint64_t offset_blocks, num_blocks; 946 947 memset(&bdev, 0, sizeof(bdev)); 948 949 bdev.blocklen = 512; 950 951 /* All parameters valid */ 952 offset_blocks = 0; 953 num_blocks = 0; 954 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0); 955 CU_ASSERT(offset_blocks == 1); 956 CU_ASSERT(num_blocks == 2); 957 958 /* Offset not a block multiple */ 959 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0); 960 961 /* Length not a block multiple */ 962 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0); 963 964 /* In case blocklen not the power of two */ 965 bdev.blocklen = 100; 966 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 200, &num_blocks) == 0); 967 CU_ASSERT(offset_blocks == 1); 968 CU_ASSERT(num_blocks == 2); 969 970 /* Offset not a block multiple */ 971 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 100, &num_blocks) != 0); 972 973 /* Length not a block multiple */ 974 CU_ASSERT(bdev_bytes_to_blocks(&bdev, 100, &offset_blocks, 3, &num_blocks) != 0); 975 } 976 977 static void 978 num_blocks_test(void) 979 { 980 struct spdk_bdev *bdev; 981 struct spdk_bdev_desc *desc = NULL; 982 int rc; 983 984 ut_init_bdev(NULL); 985 bdev = allocate_bdev("num_blocks"); 986 987 spdk_bdev_notify_blockcnt_change(bdev, 50); 988 989 /* Growing block number */ 990 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 70) == 0); 991 /* Shrinking block number */ 992 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 30) == 0); 993 994 rc = spdk_bdev_open_ext("num_blocks", false, bdev_open_cb1, &desc, &desc); 995 CU_ASSERT(rc == 0); 996 SPDK_CU_ASSERT_FATAL(desc != NULL); 997 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 998 999 /* Growing block number */ 1000 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 80) == 0); 1001 /* Shrinking block number */ 1002 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 20) != 0); 1003 1004 g_event_type1 = 0xFF; 1005 /* Growing block number */ 1006 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 90) == 0); 1007 1008 poll_threads(); 1009 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_RESIZE); 1010 1011 g_event_type1 = 0xFF; 1012 /* Growing block number and closing */ 1013 CU_ASSERT(spdk_bdev_notify_blockcnt_change(bdev, 100) == 0); 1014 1015 spdk_bdev_close(desc); 1016 free_bdev(bdev); 1017 ut_fini_bdev(); 1018 1019 poll_threads(); 1020 1021 /* Callback is not called for closed device */ 1022 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 1023 } 1024 1025 static void 1026 io_valid_test(void) 1027 { 1028 struct spdk_bdev bdev; 1029 1030 memset(&bdev, 0, sizeof(bdev)); 1031 1032 bdev.blocklen = 512; 1033 spdk_spin_init(&bdev.internal.spinlock); 1034 1035 spdk_bdev_notify_blockcnt_change(&bdev, 100); 1036 1037 /* All parameters valid */ 1038 CU_ASSERT(bdev_io_valid_blocks(&bdev, 1, 2) == true); 1039 1040 /* Last valid block */ 1041 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 1) == true); 1042 1043 /* Offset past end of bdev */ 1044 CU_ASSERT(bdev_io_valid_blocks(&bdev, 100, 1) == false); 1045 1046 /* Offset + length past end of bdev */ 1047 CU_ASSERT(bdev_io_valid_blocks(&bdev, 99, 2) == false); 1048 1049 /* Offset near end of uint64_t range (2^64 - 1) */ 1050 CU_ASSERT(bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false); 1051 1052 spdk_spin_destroy(&bdev.internal.spinlock); 1053 } 1054 1055 static void 1056 alias_add_del_test(void) 1057 { 1058 struct spdk_bdev *bdev[3]; 1059 int rc; 1060 1061 ut_init_bdev(NULL); 1062 1063 /* Creating and registering bdevs */ 1064 bdev[0] = allocate_bdev("bdev0"); 1065 SPDK_CU_ASSERT_FATAL(bdev[0] != 0); 1066 1067 bdev[1] = allocate_bdev("bdev1"); 1068 SPDK_CU_ASSERT_FATAL(bdev[1] != 0); 1069 1070 bdev[2] = allocate_bdev("bdev2"); 1071 SPDK_CU_ASSERT_FATAL(bdev[2] != 0); 1072 1073 poll_threads(); 1074 1075 /* 1076 * Trying adding an alias identical to name. 1077 * Alias is identical to name, so it can not be added to aliases list 1078 */ 1079 rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name); 1080 CU_ASSERT(rc == -EEXIST); 1081 1082 /* 1083 * Trying to add empty alias, 1084 * this one should fail 1085 */ 1086 rc = spdk_bdev_alias_add(bdev[0], NULL); 1087 CU_ASSERT(rc == -EINVAL); 1088 1089 /* Trying adding same alias to two different registered bdevs */ 1090 1091 /* Alias is used first time, so this one should pass */ 1092 rc = spdk_bdev_alias_add(bdev[0], "proper alias 0"); 1093 CU_ASSERT(rc == 0); 1094 1095 /* Alias was added to another bdev, so this one should fail */ 1096 rc = spdk_bdev_alias_add(bdev[1], "proper alias 0"); 1097 CU_ASSERT(rc == -EEXIST); 1098 1099 /* Alias is used first time, so this one should pass */ 1100 rc = spdk_bdev_alias_add(bdev[1], "proper alias 1"); 1101 CU_ASSERT(rc == 0); 1102 1103 /* Trying removing an alias from registered bdevs */ 1104 1105 /* Alias is not on a bdev aliases list, so this one should fail */ 1106 rc = spdk_bdev_alias_del(bdev[0], "not existing"); 1107 CU_ASSERT(rc == -ENOENT); 1108 1109 /* Alias is present on a bdev aliases list, so this one should pass */ 1110 rc = spdk_bdev_alias_del(bdev[0], "proper alias 0"); 1111 CU_ASSERT(rc == 0); 1112 1113 /* Alias is present on a bdev aliases list, so this one should pass */ 1114 rc = spdk_bdev_alias_del(bdev[1], "proper alias 1"); 1115 CU_ASSERT(rc == 0); 1116 1117 /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */ 1118 rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name); 1119 CU_ASSERT(rc != 0); 1120 1121 /* Trying to del all alias from empty alias list */ 1122 spdk_bdev_alias_del_all(bdev[2]); 1123 SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases)); 1124 1125 /* Trying to del all alias from non-empty alias list */ 1126 rc = spdk_bdev_alias_add(bdev[2], "alias0"); 1127 CU_ASSERT(rc == 0); 1128 rc = spdk_bdev_alias_add(bdev[2], "alias1"); 1129 CU_ASSERT(rc == 0); 1130 spdk_bdev_alias_del_all(bdev[2]); 1131 CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases)); 1132 1133 /* Unregister and free bdevs */ 1134 spdk_bdev_unregister(bdev[0], NULL, NULL); 1135 spdk_bdev_unregister(bdev[1], NULL, NULL); 1136 spdk_bdev_unregister(bdev[2], NULL, NULL); 1137 1138 poll_threads(); 1139 1140 free(bdev[0]); 1141 free(bdev[1]); 1142 free(bdev[2]); 1143 1144 ut_fini_bdev(); 1145 } 1146 1147 static void 1148 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 1149 { 1150 g_io_done = true; 1151 g_io_status = bdev_io->internal.status; 1152 if ((bdev_io->type == SPDK_BDEV_IO_TYPE_ZCOPY) && 1153 (bdev_io->u.bdev.zcopy.start)) { 1154 g_zcopy_bdev_io = bdev_io; 1155 } else { 1156 spdk_bdev_free_io(bdev_io); 1157 g_zcopy_bdev_io = NULL; 1158 } 1159 } 1160 1161 struct bdev_ut_io_wait_entry { 1162 struct spdk_bdev_io_wait_entry entry; 1163 struct spdk_io_channel *io_ch; 1164 struct spdk_bdev_desc *desc; 1165 bool submitted; 1166 }; 1167 1168 static void 1169 io_wait_cb(void *arg) 1170 { 1171 struct bdev_ut_io_wait_entry *entry = arg; 1172 int rc; 1173 1174 rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL); 1175 CU_ASSERT(rc == 0); 1176 entry->submitted = true; 1177 } 1178 1179 static void 1180 bdev_io_types_test(void) 1181 { 1182 struct spdk_bdev *bdev; 1183 struct spdk_bdev_desc *desc = NULL; 1184 struct spdk_io_channel *io_ch; 1185 struct spdk_bdev_opts bdev_opts = {}; 1186 int rc; 1187 1188 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1189 bdev_opts.bdev_io_pool_size = 4; 1190 bdev_opts.bdev_io_cache_size = 2; 1191 ut_init_bdev(&bdev_opts); 1192 1193 bdev = allocate_bdev("bdev0"); 1194 1195 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1196 CU_ASSERT(rc == 0); 1197 poll_threads(); 1198 SPDK_CU_ASSERT_FATAL(desc != NULL); 1199 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1200 io_ch = spdk_bdev_get_io_channel(desc); 1201 CU_ASSERT(io_ch != NULL); 1202 1203 /* WRITE and WRITE ZEROES are not supported */ 1204 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 1205 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, false); 1206 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 0, 128, io_done, NULL); 1207 CU_ASSERT(rc == -ENOTSUP); 1208 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 1209 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE, true); 1210 1211 /* NVME_IO, NVME_IO_MD and NVME_ADMIN are not supported */ 1212 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, false); 1213 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, false); 1214 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, false); 1215 rc = spdk_bdev_nvme_io_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1216 CU_ASSERT(rc == -ENOTSUP); 1217 rc = spdk_bdev_nvme_io_passthru_md(desc, io_ch, NULL, NULL, 0, NULL, 0, NULL, NULL); 1218 CU_ASSERT(rc == -ENOTSUP); 1219 rc = spdk_bdev_nvme_admin_passthru(desc, io_ch, NULL, NULL, 0, NULL, NULL); 1220 CU_ASSERT(rc == -ENOTSUP); 1221 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO, true); 1222 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_IO_MD, true); 1223 ut_enable_io_type(SPDK_BDEV_IO_TYPE_NVME_ADMIN, true); 1224 1225 spdk_put_io_channel(io_ch); 1226 spdk_bdev_close(desc); 1227 free_bdev(bdev); 1228 ut_fini_bdev(); 1229 } 1230 1231 static void 1232 bdev_io_wait_test(void) 1233 { 1234 struct spdk_bdev *bdev; 1235 struct spdk_bdev_desc *desc = NULL; 1236 struct spdk_io_channel *io_ch; 1237 struct spdk_bdev_opts bdev_opts = {}; 1238 struct bdev_ut_io_wait_entry io_wait_entry; 1239 struct bdev_ut_io_wait_entry io_wait_entry2; 1240 int rc; 1241 1242 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1243 bdev_opts.bdev_io_pool_size = 4; 1244 bdev_opts.bdev_io_cache_size = 2; 1245 ut_init_bdev(&bdev_opts); 1246 1247 bdev = allocate_bdev("bdev0"); 1248 1249 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1250 CU_ASSERT(rc == 0); 1251 poll_threads(); 1252 SPDK_CU_ASSERT_FATAL(desc != NULL); 1253 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1254 io_ch = spdk_bdev_get_io_channel(desc); 1255 CU_ASSERT(io_ch != NULL); 1256 1257 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1258 CU_ASSERT(rc == 0); 1259 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1260 CU_ASSERT(rc == 0); 1261 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1262 CU_ASSERT(rc == 0); 1263 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1264 CU_ASSERT(rc == 0); 1265 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1266 1267 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 1268 CU_ASSERT(rc == -ENOMEM); 1269 1270 io_wait_entry.entry.bdev = bdev; 1271 io_wait_entry.entry.cb_fn = io_wait_cb; 1272 io_wait_entry.entry.cb_arg = &io_wait_entry; 1273 io_wait_entry.io_ch = io_ch; 1274 io_wait_entry.desc = desc; 1275 io_wait_entry.submitted = false; 1276 /* Cannot use the same io_wait_entry for two different calls. */ 1277 memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry)); 1278 io_wait_entry2.entry.cb_arg = &io_wait_entry2; 1279 1280 /* Queue two I/O waits. */ 1281 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry); 1282 CU_ASSERT(rc == 0); 1283 CU_ASSERT(io_wait_entry.submitted == false); 1284 rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry); 1285 CU_ASSERT(rc == 0); 1286 CU_ASSERT(io_wait_entry2.submitted == false); 1287 1288 stub_complete_io(1); 1289 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1290 CU_ASSERT(io_wait_entry.submitted == true); 1291 CU_ASSERT(io_wait_entry2.submitted == false); 1292 1293 stub_complete_io(1); 1294 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 1295 CU_ASSERT(io_wait_entry2.submitted == true); 1296 1297 stub_complete_io(4); 1298 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1299 1300 spdk_put_io_channel(io_ch); 1301 spdk_bdev_close(desc); 1302 free_bdev(bdev); 1303 ut_fini_bdev(); 1304 } 1305 1306 static void 1307 bdev_io_spans_split_test(void) 1308 { 1309 struct spdk_bdev bdev; 1310 struct spdk_bdev_io bdev_io; 1311 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV]; 1312 1313 memset(&bdev, 0, sizeof(bdev)); 1314 bdev_io.u.bdev.iovs = iov; 1315 1316 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1317 bdev.optimal_io_boundary = 0; 1318 bdev.max_segment_size = 0; 1319 bdev.max_num_segments = 0; 1320 bdev_io.bdev = &bdev; 1321 1322 /* bdev has no optimal_io_boundary and max_size set - so this should return false. */ 1323 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1324 1325 bdev.split_on_optimal_io_boundary = true; 1326 bdev.optimal_io_boundary = 32; 1327 bdev_io.type = SPDK_BDEV_IO_TYPE_RESET; 1328 1329 /* RESETs are not based on LBAs - so this should return false. */ 1330 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1331 1332 bdev_io.type = SPDK_BDEV_IO_TYPE_READ; 1333 bdev_io.u.bdev.offset_blocks = 0; 1334 bdev_io.u.bdev.num_blocks = 32; 1335 1336 /* This I/O run right up to, but does not cross, the boundary - so this should return false. */ 1337 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1338 1339 bdev_io.u.bdev.num_blocks = 33; 1340 1341 /* This I/O spans a boundary. */ 1342 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1343 1344 bdev_io.u.bdev.num_blocks = 32; 1345 bdev.max_segment_size = 512 * 32; 1346 bdev.max_num_segments = 1; 1347 bdev_io.u.bdev.iovcnt = 1; 1348 iov[0].iov_len = 512; 1349 1350 /* Does not cross and exceed max_size or max_segs */ 1351 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1352 1353 bdev.split_on_optimal_io_boundary = false; 1354 bdev.max_segment_size = 512; 1355 bdev.max_num_segments = 1; 1356 bdev_io.u.bdev.iovcnt = 2; 1357 1358 /* Exceed max_segs */ 1359 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1360 1361 bdev.max_num_segments = 2; 1362 iov[0].iov_len = 513; 1363 iov[1].iov_len = 512; 1364 1365 /* Exceed max_sizes */ 1366 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1367 1368 bdev.max_segment_size = 0; 1369 bdev.write_unit_size = 32; 1370 bdev.split_on_write_unit = true; 1371 bdev_io.type = SPDK_BDEV_IO_TYPE_WRITE; 1372 1373 /* This I/O is one write unit */ 1374 CU_ASSERT(bdev_io_should_split(&bdev_io) == false); 1375 1376 bdev_io.u.bdev.num_blocks = 32 * 2; 1377 1378 /* This I/O is more than one write unit */ 1379 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1380 1381 bdev_io.u.bdev.offset_blocks = 1; 1382 bdev_io.u.bdev.num_blocks = 32; 1383 1384 /* This I/O is not aligned to write unit size */ 1385 CU_ASSERT(bdev_io_should_split(&bdev_io) == true); 1386 } 1387 1388 static void 1389 bdev_io_boundary_split_test(void) 1390 { 1391 struct spdk_bdev *bdev; 1392 struct spdk_bdev_desc *desc = NULL; 1393 struct spdk_io_channel *io_ch; 1394 struct spdk_bdev_opts bdev_opts = {}; 1395 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 1396 struct ut_expected_io *expected_io; 1397 void *md_buf = (void *)0xFF000000; 1398 uint64_t i; 1399 int rc; 1400 1401 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 1402 bdev_opts.bdev_io_pool_size = 512; 1403 bdev_opts.bdev_io_cache_size = 64; 1404 ut_init_bdev(&bdev_opts); 1405 1406 bdev = allocate_bdev("bdev0"); 1407 1408 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 1409 CU_ASSERT(rc == 0); 1410 SPDK_CU_ASSERT_FATAL(desc != NULL); 1411 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 1412 io_ch = spdk_bdev_get_io_channel(desc); 1413 CU_ASSERT(io_ch != NULL); 1414 1415 bdev->optimal_io_boundary = 16; 1416 bdev->split_on_optimal_io_boundary = false; 1417 1418 g_io_done = false; 1419 1420 /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */ 1421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1); 1422 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512); 1423 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1424 1425 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 1426 CU_ASSERT(rc == 0); 1427 CU_ASSERT(g_io_done == false); 1428 1429 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1430 stub_complete_io(1); 1431 CU_ASSERT(g_io_done == true); 1432 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1433 1434 bdev->split_on_optimal_io_boundary = true; 1435 bdev->md_interleave = false; 1436 bdev->md_len = 8; 1437 1438 /* Now test that a single-vector command is split correctly. 1439 * Offset 14, length 8, payload 0xF000 1440 * Child - Offset 14, length 2, payload 0xF000 1441 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 1442 * 1443 * Set up the expected values before calling spdk_bdev_read_blocks 1444 */ 1445 g_io_done = false; 1446 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 1447 expected_io->md_buf = md_buf; 1448 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 1449 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1450 1451 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 1452 expected_io->md_buf = md_buf + 2 * 8; 1453 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 1454 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1455 1456 /* spdk_bdev_read_blocks will submit the first child immediately. */ 1457 rc = spdk_bdev_read_blocks_with_md(desc, io_ch, (void *)0xF000, md_buf, 1458 14, 8, io_done, NULL); 1459 CU_ASSERT(rc == 0); 1460 CU_ASSERT(g_io_done == false); 1461 1462 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1463 stub_complete_io(2); 1464 CU_ASSERT(g_io_done == true); 1465 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1466 1467 /* Now set up a more complex, multi-vector command that needs to be split, 1468 * including splitting iovecs. 1469 */ 1470 iov[0].iov_base = (void *)0x10000; 1471 iov[0].iov_len = 512; 1472 iov[1].iov_base = (void *)0x20000; 1473 iov[1].iov_len = 20 * 512; 1474 iov[2].iov_base = (void *)0x30000; 1475 iov[2].iov_len = 11 * 512; 1476 1477 g_io_done = false; 1478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 1479 expected_io->md_buf = md_buf; 1480 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 1481 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 1482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1483 1484 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 1485 expected_io->md_buf = md_buf + 2 * 8; 1486 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 1487 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1488 1489 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 1490 expected_io->md_buf = md_buf + 18 * 8; 1491 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 1492 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 1493 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1494 1495 rc = spdk_bdev_writev_blocks_with_md(desc, io_ch, iov, 3, md_buf, 1496 14, 32, io_done, NULL); 1497 CU_ASSERT(rc == 0); 1498 CU_ASSERT(g_io_done == false); 1499 1500 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 1501 stub_complete_io(3); 1502 CU_ASSERT(g_io_done == true); 1503 1504 /* Test multi vector command that needs to be split by strip and then needs to be 1505 * split further due to the capacity of child iovs. 1506 */ 1507 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 1508 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1509 iov[i].iov_len = 512; 1510 } 1511 1512 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1513 g_io_done = false; 1514 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1515 SPDK_BDEV_IO_NUM_CHILD_IOV); 1516 expected_io->md_buf = md_buf; 1517 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1518 ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512); 1519 } 1520 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1521 1522 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1523 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 1524 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1525 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1526 ut_expected_io_set_iov(expected_io, i, 1527 (void *)((i + 1 + SPDK_BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512); 1528 } 1529 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1530 1531 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1532 0, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 1533 CU_ASSERT(rc == 0); 1534 CU_ASSERT(g_io_done == false); 1535 1536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1537 stub_complete_io(1); 1538 CU_ASSERT(g_io_done == false); 1539 1540 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1541 stub_complete_io(1); 1542 CU_ASSERT(g_io_done == true); 1543 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1544 1545 /* Test multi vector command that needs to be split by strip and then needs to be 1546 * split further due to the capacity of child iovs. In this case, the length of 1547 * the rest of iovec array with an I/O boundary is the multiple of block size. 1548 */ 1549 1550 /* Fill iovec array for exactly one boundary. The iovec cnt for this boundary 1551 * is SPDK_BDEV_IO_NUM_CHILD_IOV + 1, which exceeds the capacity of child iovs. 1552 */ 1553 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1554 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1555 iov[i].iov_len = 512; 1556 } 1557 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1558 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1559 iov[i].iov_len = 256; 1560 } 1561 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1562 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 512; 1563 1564 /* Add an extra iovec to trigger split */ 1565 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1566 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1567 1568 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1569 g_io_done = false; 1570 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1571 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV); 1572 expected_io->md_buf = md_buf; 1573 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1574 ut_expected_io_set_iov(expected_io, i, 1575 (void *)((i + 1) * 0x10000), 512); 1576 } 1577 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 1578 ut_expected_io_set_iov(expected_io, i, 1579 (void *)((i + 1) * 0x10000), 256); 1580 } 1581 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1582 1583 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1584 1, 1); 1585 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1586 ut_expected_io_set_iov(expected_io, 0, 1587 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 512); 1588 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1589 1590 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1591 1, 1); 1592 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1593 ut_expected_io_set_iov(expected_io, 0, 1594 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1595 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1596 1597 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, md_buf, 1598 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1599 CU_ASSERT(rc == 0); 1600 CU_ASSERT(g_io_done == false); 1601 1602 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1603 stub_complete_io(1); 1604 CU_ASSERT(g_io_done == false); 1605 1606 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1607 stub_complete_io(2); 1608 CU_ASSERT(g_io_done == true); 1609 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1610 1611 /* Test multi vector command that needs to be split by strip and then needs to be 1612 * split further due to the capacity of child iovs, the child request offset should 1613 * be rewind to last aligned offset and go success without error. 1614 */ 1615 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1616 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1617 iov[i].iov_len = 512; 1618 } 1619 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1620 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1621 1622 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1623 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1624 1625 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1626 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1627 1628 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1629 g_io_done = false; 1630 g_io_status = 0; 1631 /* The first expected io should be start from offset 0 to SPDK_BDEV_IO_NUM_CHILD_IOV - 1 */ 1632 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1633 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1634 expected_io->md_buf = md_buf; 1635 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1636 ut_expected_io_set_iov(expected_io, i, 1637 (void *)((i + 1) * 0x10000), 512); 1638 } 1639 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1640 /* The second expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV - 1 to SPDK_BDEV_IO_NUM_CHILD_IOV */ 1641 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 1, 1642 1, 2); 1643 expected_io->md_buf = md_buf + (SPDK_BDEV_IO_NUM_CHILD_IOV - 1) * 8; 1644 ut_expected_io_set_iov(expected_io, 0, 1645 (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000), 256); 1646 ut_expected_io_set_iov(expected_io, 1, 1647 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000), 256); 1648 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1649 /* The third expected io should be start from offset SPDK_BDEV_IO_NUM_CHILD_IOV to SPDK_BDEV_IO_NUM_CHILD_IOV + 1 */ 1650 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 1651 1, 1); 1652 expected_io->md_buf = md_buf + SPDK_BDEV_IO_NUM_CHILD_IOV * 8; 1653 ut_expected_io_set_iov(expected_io, 0, 1654 (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000), 512); 1655 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1656 1657 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, md_buf, 1658 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1659 CU_ASSERT(rc == 0); 1660 CU_ASSERT(g_io_done == false); 1661 1662 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1663 stub_complete_io(1); 1664 CU_ASSERT(g_io_done == false); 1665 1666 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 1667 stub_complete_io(2); 1668 CU_ASSERT(g_io_done == true); 1669 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1670 1671 /* Test multi vector command that needs to be split due to the IO boundary and 1672 * the capacity of child iovs. Especially test the case when the command is 1673 * split due to the capacity of child iovs, the tail address is not aligned with 1674 * block size and is rewinded to the aligned address. 1675 * 1676 * The iovecs used in read request is complex but is based on the data 1677 * collected in the real issue. We change the base addresses but keep the lengths 1678 * not to loose the credibility of the test. 1679 */ 1680 bdev->optimal_io_boundary = 128; 1681 g_io_done = false; 1682 g_io_status = 0; 1683 1684 for (i = 0; i < 31; i++) { 1685 iov[i].iov_base = (void *)(0xFEED0000000 + (i << 20)); 1686 iov[i].iov_len = 1024; 1687 } 1688 iov[31].iov_base = (void *)0xFEED1F00000; 1689 iov[31].iov_len = 32768; 1690 iov[32].iov_base = (void *)0xFEED2000000; 1691 iov[32].iov_len = 160; 1692 iov[33].iov_base = (void *)0xFEED2100000; 1693 iov[33].iov_len = 4096; 1694 iov[34].iov_base = (void *)0xFEED2200000; 1695 iov[34].iov_len = 4096; 1696 iov[35].iov_base = (void *)0xFEED2300000; 1697 iov[35].iov_len = 4096; 1698 iov[36].iov_base = (void *)0xFEED2400000; 1699 iov[36].iov_len = 4096; 1700 iov[37].iov_base = (void *)0xFEED2500000; 1701 iov[37].iov_len = 4096; 1702 iov[38].iov_base = (void *)0xFEED2600000; 1703 iov[38].iov_len = 4096; 1704 iov[39].iov_base = (void *)0xFEED2700000; 1705 iov[39].iov_len = 4096; 1706 iov[40].iov_base = (void *)0xFEED2800000; 1707 iov[40].iov_len = 4096; 1708 iov[41].iov_base = (void *)0xFEED2900000; 1709 iov[41].iov_len = 4096; 1710 iov[42].iov_base = (void *)0xFEED2A00000; 1711 iov[42].iov_len = 4096; 1712 iov[43].iov_base = (void *)0xFEED2B00000; 1713 iov[43].iov_len = 12288; 1714 iov[44].iov_base = (void *)0xFEED2C00000; 1715 iov[44].iov_len = 8192; 1716 iov[45].iov_base = (void *)0xFEED2F00000; 1717 iov[45].iov_len = 4096; 1718 iov[46].iov_base = (void *)0xFEED3000000; 1719 iov[46].iov_len = 4096; 1720 iov[47].iov_base = (void *)0xFEED3100000; 1721 iov[47].iov_len = 4096; 1722 iov[48].iov_base = (void *)0xFEED3200000; 1723 iov[48].iov_len = 24576; 1724 iov[49].iov_base = (void *)0xFEED3300000; 1725 iov[49].iov_len = 16384; 1726 iov[50].iov_base = (void *)0xFEED3400000; 1727 iov[50].iov_len = 12288; 1728 iov[51].iov_base = (void *)0xFEED3500000; 1729 iov[51].iov_len = 4096; 1730 iov[52].iov_base = (void *)0xFEED3600000; 1731 iov[52].iov_len = 4096; 1732 iov[53].iov_base = (void *)0xFEED3700000; 1733 iov[53].iov_len = 4096; 1734 iov[54].iov_base = (void *)0xFEED3800000; 1735 iov[54].iov_len = 28672; 1736 iov[55].iov_base = (void *)0xFEED3900000; 1737 iov[55].iov_len = 20480; 1738 iov[56].iov_base = (void *)0xFEED3A00000; 1739 iov[56].iov_len = 4096; 1740 iov[57].iov_base = (void *)0xFEED3B00000; 1741 iov[57].iov_len = 12288; 1742 iov[58].iov_base = (void *)0xFEED3C00000; 1743 iov[58].iov_len = 4096; 1744 iov[59].iov_base = (void *)0xFEED3D00000; 1745 iov[59].iov_len = 4096; 1746 iov[60].iov_base = (void *)0xFEED3E00000; 1747 iov[60].iov_len = 352; 1748 1749 /* The 1st child IO must be from iov[0] to iov[31] split by the capacity 1750 * of child iovs, 1751 */ 1752 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 126, 32); 1753 expected_io->md_buf = md_buf; 1754 for (i = 0; i < 32; i++) { 1755 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1756 } 1757 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1758 1759 /* The 2nd child IO must be from iov[32] to the first 864 bytes of iov[33] 1760 * split by the IO boundary requirement. 1761 */ 1762 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 126, 2, 2); 1763 expected_io->md_buf = md_buf + 126 * 8; 1764 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base, iov[32].iov_len); 1765 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 864); 1766 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1767 1768 /* The 3rd child IO must be from the remaining 3232 bytes of iov[33] to 1769 * the first 864 bytes of iov[46] split by the IO boundary requirement. 1770 */ 1771 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 128, 128, 14); 1772 expected_io->md_buf = md_buf + 128 * 8; 1773 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[33].iov_base + 864), 1774 iov[33].iov_len - 864); 1775 ut_expected_io_set_iov(expected_io, 1, iov[34].iov_base, iov[34].iov_len); 1776 ut_expected_io_set_iov(expected_io, 2, iov[35].iov_base, iov[35].iov_len); 1777 ut_expected_io_set_iov(expected_io, 3, iov[36].iov_base, iov[36].iov_len); 1778 ut_expected_io_set_iov(expected_io, 4, iov[37].iov_base, iov[37].iov_len); 1779 ut_expected_io_set_iov(expected_io, 5, iov[38].iov_base, iov[38].iov_len); 1780 ut_expected_io_set_iov(expected_io, 6, iov[39].iov_base, iov[39].iov_len); 1781 ut_expected_io_set_iov(expected_io, 7, iov[40].iov_base, iov[40].iov_len); 1782 ut_expected_io_set_iov(expected_io, 8, iov[41].iov_base, iov[41].iov_len); 1783 ut_expected_io_set_iov(expected_io, 9, iov[42].iov_base, iov[42].iov_len); 1784 ut_expected_io_set_iov(expected_io, 10, iov[43].iov_base, iov[43].iov_len); 1785 ut_expected_io_set_iov(expected_io, 11, iov[44].iov_base, iov[44].iov_len); 1786 ut_expected_io_set_iov(expected_io, 12, iov[45].iov_base, iov[45].iov_len); 1787 ut_expected_io_set_iov(expected_io, 13, iov[46].iov_base, 864); 1788 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1789 1790 /* The 4th child IO must be from the remaining 3232 bytes of iov[46] to the 1791 * first 864 bytes of iov[52] split by the IO boundary requirement. 1792 */ 1793 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 256, 128, 7); 1794 expected_io->md_buf = md_buf + 256 * 8; 1795 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[46].iov_base + 864), 1796 iov[46].iov_len - 864); 1797 ut_expected_io_set_iov(expected_io, 1, iov[47].iov_base, iov[47].iov_len); 1798 ut_expected_io_set_iov(expected_io, 2, iov[48].iov_base, iov[48].iov_len); 1799 ut_expected_io_set_iov(expected_io, 3, iov[49].iov_base, iov[49].iov_len); 1800 ut_expected_io_set_iov(expected_io, 4, iov[50].iov_base, iov[50].iov_len); 1801 ut_expected_io_set_iov(expected_io, 5, iov[51].iov_base, iov[51].iov_len); 1802 ut_expected_io_set_iov(expected_io, 6, iov[52].iov_base, 864); 1803 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1804 1805 /* The 5th child IO must be from the remaining 3232 bytes of iov[52] to 1806 * the first 4096 bytes of iov[57] split by the IO boundary requirement. 1807 */ 1808 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 384, 128, 6); 1809 expected_io->md_buf = md_buf + 384 * 8; 1810 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[52].iov_base + 864), 1811 iov[52].iov_len - 864); 1812 ut_expected_io_set_iov(expected_io, 1, iov[53].iov_base, iov[53].iov_len); 1813 ut_expected_io_set_iov(expected_io, 2, iov[54].iov_base, iov[54].iov_len); 1814 ut_expected_io_set_iov(expected_io, 3, iov[55].iov_base, iov[55].iov_len); 1815 ut_expected_io_set_iov(expected_io, 4, iov[56].iov_base, iov[56].iov_len); 1816 ut_expected_io_set_iov(expected_io, 5, iov[57].iov_base, 4960); 1817 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1818 1819 /* The 6th child IO must be from the remaining 7328 bytes of iov[57] 1820 * to the first 3936 bytes of iov[58] split by the capacity of child iovs. 1821 */ 1822 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 512, 30, 3); 1823 expected_io->md_buf = md_buf + 512 * 8; 1824 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[57].iov_base + 4960), 1825 iov[57].iov_len - 4960); 1826 ut_expected_io_set_iov(expected_io, 1, iov[58].iov_base, iov[58].iov_len); 1827 ut_expected_io_set_iov(expected_io, 2, iov[59].iov_base, 3936); 1828 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1829 1830 /* The 7th child IO is from the remaining 160 bytes of iov[59] and iov[60]. */ 1831 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 542, 1, 2); 1832 expected_io->md_buf = md_buf + 542 * 8; 1833 ut_expected_io_set_iov(expected_io, 0, (void *)((uintptr_t)iov[59].iov_base + 3936), 1834 iov[59].iov_len - 3936); 1835 ut_expected_io_set_iov(expected_io, 1, iov[60].iov_base, iov[60].iov_len); 1836 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1837 1838 rc = spdk_bdev_readv_blocks_with_md(desc, io_ch, iov, 61, md_buf, 1839 0, 543, io_done, NULL); 1840 CU_ASSERT(rc == 0); 1841 CU_ASSERT(g_io_done == false); 1842 1843 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1844 stub_complete_io(1); 1845 CU_ASSERT(g_io_done == false); 1846 1847 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1848 stub_complete_io(5); 1849 CU_ASSERT(g_io_done == false); 1850 1851 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1852 stub_complete_io(1); 1853 CU_ASSERT(g_io_done == true); 1854 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 1855 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1856 1857 /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be 1858 * split, so test that. 1859 */ 1860 bdev->optimal_io_boundary = 15; 1861 g_io_done = false; 1862 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 1863 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1864 1865 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 1866 CU_ASSERT(rc == 0); 1867 CU_ASSERT(g_io_done == false); 1868 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1869 stub_complete_io(1); 1870 CU_ASSERT(g_io_done == true); 1871 1872 /* Test an UNMAP. This should also not be split. */ 1873 bdev->optimal_io_boundary = 16; 1874 g_io_done = false; 1875 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0); 1876 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1877 1878 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL); 1879 CU_ASSERT(rc == 0); 1880 CU_ASSERT(g_io_done == false); 1881 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1882 stub_complete_io(1); 1883 CU_ASSERT(g_io_done == true); 1884 1885 /* Test a FLUSH. This should also not be split. */ 1886 bdev->optimal_io_boundary = 16; 1887 g_io_done = false; 1888 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0); 1889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1890 1891 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 1892 CU_ASSERT(rc == 0); 1893 CU_ASSERT(g_io_done == false); 1894 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1895 stub_complete_io(1); 1896 CU_ASSERT(g_io_done == true); 1897 1898 /* Test a COPY. This should also not be split. */ 1899 bdev->optimal_io_boundary = 15; 1900 g_io_done = false; 1901 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 1902 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 1903 1904 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 1905 CU_ASSERT(rc == 0); 1906 CU_ASSERT(g_io_done == false); 1907 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1908 stub_complete_io(1); 1909 CU_ASSERT(g_io_done == true); 1910 1911 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 1912 1913 /* Children requests return an error status */ 1914 bdev->optimal_io_boundary = 16; 1915 iov[0].iov_base = (void *)0x10000; 1916 iov[0].iov_len = 512 * 64; 1917 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1918 g_io_done = false; 1919 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1920 1921 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 1, 64, io_done, NULL); 1922 CU_ASSERT(rc == 0); 1923 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 5); 1924 stub_complete_io(4); 1925 CU_ASSERT(g_io_done == false); 1926 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 1927 stub_complete_io(1); 1928 CU_ASSERT(g_io_done == true); 1929 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1930 1931 /* Test if a multi vector command terminated with failure before continuing 1932 * splitting process when one of child I/O failed. 1933 * The multi vector command is as same as the above that needs to be split by strip 1934 * and then needs to be split further due to the capacity of child iovs. 1935 */ 1936 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 1; i++) { 1937 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1938 iov[i].iov_len = 512; 1939 } 1940 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(SPDK_BDEV_IO_NUM_CHILD_IOV * 0x10000); 1941 iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256; 1942 1943 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 0x10000); 1944 iov[SPDK_BDEV_IO_NUM_CHILD_IOV].iov_len = 256; 1945 1946 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_base = (void *)((SPDK_BDEV_IO_NUM_CHILD_IOV + 2) * 0x10000); 1947 iov[SPDK_BDEV_IO_NUM_CHILD_IOV + 1].iov_len = 512; 1948 1949 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 1950 1951 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 1952 g_io_done = false; 1953 g_io_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1954 1955 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 1956 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 1957 CU_ASSERT(rc == 0); 1958 CU_ASSERT(g_io_done == false); 1959 1960 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 1961 stub_complete_io(1); 1962 CU_ASSERT(g_io_done == true); 1963 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 1964 1965 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 1966 1967 /* for this test we will create the following conditions to hit the code path where 1968 * we are trying to send and IO following a split that has no iovs because we had to 1969 * trim them for alignment reasons. 1970 * 1971 * - 16K boundary, our IO will start at offset 0 with a length of 0x4200 1972 * - Our IOVs are 0x212 in size so that we run into the 16K boundary at child IOV 1973 * position 30 and overshoot by 0x2e. 1974 * - That means we'll send the IO and loop back to pick up the remaining bytes at 1975 * child IOV index 31. When we do, we find that we have to shorten index 31 by 0x2e 1976 * which eliniates that vector so we just send the first split IO with 30 vectors 1977 * and let the completion pick up the last 2 vectors. 1978 */ 1979 bdev->optimal_io_boundary = 32; 1980 bdev->split_on_optimal_io_boundary = true; 1981 g_io_done = false; 1982 1983 /* Init all parent IOVs to 0x212 */ 1984 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 1985 iov[i].iov_base = (void *)((i + 1) * 0x10000); 1986 iov[i].iov_len = 0x212; 1987 } 1988 1989 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, SPDK_BDEV_IO_NUM_CHILD_IOV, 1990 SPDK_BDEV_IO_NUM_CHILD_IOV - 1); 1991 /* expect 0-29 to be 1:1 with the parent iov */ 1992 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 1993 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 1994 } 1995 1996 /* expect index 30 to be shortened to 0x1e4 (0x212 - 0x1e) because of the alignment 1997 * where 0x1e is the amount we overshot the 16K boundary 1998 */ 1999 ut_expected_io_set_iov(expected_io, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2000 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base), 0x1e4); 2001 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2002 2003 /* 2nd child IO will have 2 remaining vectors, one to pick up from the one that was 2004 * shortened that take it to the next boundary and then a final one to get us to 2005 * 0x4200 bytes for the IO. 2006 */ 2007 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2008 SPDK_BDEV_IO_NUM_CHILD_IOV, 2); 2009 /* position 30 picked up the remaining bytes to the next boundary */ 2010 ut_expected_io_set_iov(expected_io, 0, 2011 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 2].iov_base + 0x1e4), 0x2e); 2012 2013 /* position 31 picked the the rest of the transfer to get us to 0x4200 */ 2014 ut_expected_io_set_iov(expected_io, 1, 2015 (void *)(iov[SPDK_BDEV_IO_NUM_CHILD_IOV - 1].iov_base), 0x1d2); 2016 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2017 2018 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, 0, 2019 SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2020 CU_ASSERT(rc == 0); 2021 CU_ASSERT(g_io_done == false); 2022 2023 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2024 stub_complete_io(1); 2025 CU_ASSERT(g_io_done == false); 2026 2027 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2028 stub_complete_io(1); 2029 CU_ASSERT(g_io_done == true); 2030 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2031 2032 spdk_put_io_channel(io_ch); 2033 spdk_bdev_close(desc); 2034 free_bdev(bdev); 2035 ut_fini_bdev(); 2036 } 2037 2038 static void 2039 bdev_io_max_size_and_segment_split_test(void) 2040 { 2041 struct spdk_bdev *bdev; 2042 struct spdk_bdev_desc *desc = NULL; 2043 struct spdk_io_channel *io_ch; 2044 struct spdk_bdev_opts bdev_opts = {}; 2045 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2046 struct ut_expected_io *expected_io; 2047 uint64_t i; 2048 int rc; 2049 2050 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2051 bdev_opts.bdev_io_pool_size = 512; 2052 bdev_opts.bdev_io_cache_size = 64; 2053 bdev_opts.opts_size = sizeof(bdev_opts); 2054 ut_init_bdev(&bdev_opts); 2055 2056 bdev = allocate_bdev("bdev0"); 2057 2058 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2059 CU_ASSERT(rc == 0); 2060 SPDK_CU_ASSERT_FATAL(desc != NULL); 2061 io_ch = spdk_bdev_get_io_channel(desc); 2062 CU_ASSERT(io_ch != NULL); 2063 2064 bdev->split_on_optimal_io_boundary = false; 2065 bdev->optimal_io_boundary = 0; 2066 2067 /* Case 0 max_num_segments == 0. 2068 * but segment size 2 * 512 > 512 2069 */ 2070 bdev->max_segment_size = 512; 2071 bdev->max_num_segments = 0; 2072 g_io_done = false; 2073 2074 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2075 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2076 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2077 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2078 2079 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2080 CU_ASSERT(rc == 0); 2081 CU_ASSERT(g_io_done == false); 2082 2083 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2084 stub_complete_io(1); 2085 CU_ASSERT(g_io_done == true); 2086 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2087 2088 /* Case 1 max_segment_size == 0 2089 * but iov num 2 > 1. 2090 */ 2091 bdev->max_segment_size = 0; 2092 bdev->max_num_segments = 1; 2093 g_io_done = false; 2094 2095 iov[0].iov_base = (void *)0x10000; 2096 iov[0].iov_len = 512; 2097 iov[1].iov_base = (void *)0x20000; 2098 iov[1].iov_len = 8 * 512; 2099 2100 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2101 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, iov[0].iov_len); 2102 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2103 2104 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 8, 1); 2105 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, iov[1].iov_len); 2106 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2107 2108 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 14, 9, io_done, NULL); 2109 CU_ASSERT(rc == 0); 2110 CU_ASSERT(g_io_done == false); 2111 2112 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2113 stub_complete_io(2); 2114 CU_ASSERT(g_io_done == true); 2115 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2116 2117 /* Test that a non-vector command is split correctly. 2118 * Set up the expected values before calling spdk_bdev_read_blocks 2119 */ 2120 bdev->max_segment_size = 512; 2121 bdev->max_num_segments = 1; 2122 g_io_done = false; 2123 2124 /* Child IO 0 */ 2125 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 1, 1); 2126 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2127 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2128 2129 /* Child IO 1 */ 2130 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2131 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 1 * 512), 512); 2132 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2133 2134 /* spdk_bdev_read_blocks will submit the first child immediately. */ 2135 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 2, io_done, NULL); 2136 CU_ASSERT(rc == 0); 2137 CU_ASSERT(g_io_done == false); 2138 2139 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2140 stub_complete_io(2); 2141 CU_ASSERT(g_io_done == true); 2142 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2143 2144 /* Now set up a more complex, multi-vector command that needs to be split, 2145 * including splitting iovecs. 2146 */ 2147 bdev->max_segment_size = 2 * 512; 2148 bdev->max_num_segments = 1; 2149 g_io_done = false; 2150 2151 iov[0].iov_base = (void *)0x10000; 2152 iov[0].iov_len = 2 * 512; 2153 iov[1].iov_base = (void *)0x20000; 2154 iov[1].iov_len = 4 * 512; 2155 iov[2].iov_base = (void *)0x30000; 2156 iov[2].iov_len = 6 * 512; 2157 2158 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2159 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 2); 2160 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2161 2162 /* Split iov[1].size to 2 iov entries then split the segments */ 2163 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2164 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base, 512 * 2); 2165 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2166 2167 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 18, 2, 1); 2168 ut_expected_io_set_iov(expected_io, 0, iov[1].iov_base + 512 * 2, 512 * 2); 2169 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2170 2171 /* Split iov[2].size to 3 iov entries then split the segments */ 2172 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 20, 2, 1); 2173 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base, 512 * 2); 2174 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2175 2176 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 22, 2, 1); 2177 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 2, 512 * 2); 2178 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2179 2180 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 24, 2, 1); 2181 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 4, 512 * 2); 2182 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2183 2184 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 12, io_done, NULL); 2185 CU_ASSERT(rc == 0); 2186 CU_ASSERT(g_io_done == false); 2187 2188 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 2189 stub_complete_io(6); 2190 CU_ASSERT(g_io_done == true); 2191 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2192 2193 /* Test multi vector command that needs to be split by strip and then needs to be 2194 * split further due to the capacity of parent IO child iovs. 2195 */ 2196 bdev->max_segment_size = 512; 2197 bdev->max_num_segments = 1; 2198 g_io_done = false; 2199 2200 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2201 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2202 iov[i].iov_len = 512 * 2; 2203 } 2204 2205 /* Each input iov.size is split into 2 iovs, 2206 * half of the input iov can fill all child iov entries of a single IO. 2207 */ 2208 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i++) { 2209 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i, 1, 1); 2210 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2211 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2212 2213 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2 * i + 1, 1, 1); 2214 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2215 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2216 } 2217 2218 /* The remaining iov is split in the second round */ 2219 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV / 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2220 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 1, 1); 2221 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, 512); 2222 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2223 2224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2 + 1, 1, 1); 2225 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base + 512, 512); 2226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2227 } 2228 2229 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2230 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL); 2231 CU_ASSERT(rc == 0); 2232 CU_ASSERT(g_io_done == false); 2233 2234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2235 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2236 CU_ASSERT(g_io_done == false); 2237 2238 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2239 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2240 CU_ASSERT(g_io_done == true); 2241 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2242 2243 /* A wrong case, a child IO that is divided does 2244 * not meet the principle of multiples of block size, 2245 * and exits with error 2246 */ 2247 bdev->max_segment_size = 512; 2248 bdev->max_num_segments = 1; 2249 g_io_done = false; 2250 2251 iov[0].iov_base = (void *)0x10000; 2252 iov[0].iov_len = 512 + 256; 2253 iov[1].iov_base = (void *)0x20000; 2254 iov[1].iov_len = 256; 2255 2256 /* iov[0] is split to 512 and 256. 2257 * 256 is less than a block size, and it is found 2258 * in the next round of split that it is the first child IO smaller than 2259 * the block size, so the error exit 2260 */ 2261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 1, 1); 2262 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512); 2263 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2264 2265 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 2, 0, 2, io_done, NULL); 2266 CU_ASSERT(rc == 0); 2267 CU_ASSERT(g_io_done == false); 2268 2269 /* First child IO is OK */ 2270 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2271 stub_complete_io(1); 2272 CU_ASSERT(g_io_done == true); 2273 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2274 2275 /* error exit */ 2276 stub_complete_io(1); 2277 CU_ASSERT(g_io_done == true); 2278 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 2279 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2280 2281 /* Test multi vector command that needs to be split by strip and then needs to be 2282 * split further due to the capacity of child iovs. 2283 * 2284 * In this case, the last two iovs need to be split, but it will exceed the capacity 2285 * of child iovs, so it needs to wait until the first batch completed. 2286 */ 2287 bdev->max_segment_size = 512; 2288 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2289 g_io_done = false; 2290 2291 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2292 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2293 iov[i].iov_len = 512; 2294 } 2295 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV; i++) { 2296 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2297 iov[i].iov_len = 512 * 2; 2298 } 2299 2300 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2301 SPDK_BDEV_IO_NUM_CHILD_IOV, SPDK_BDEV_IO_NUM_CHILD_IOV); 2302 /* 0 ~ (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) Will not be split */ 2303 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2304 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2305 } 2306 /* (SPDK_BDEV_IO_NUM_CHILD_IOV - 2) is split */ 2307 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, 512); 2308 ut_expected_io_set_iov(expected_io, i + 1, iov[i].iov_base + 512, 512); 2309 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2310 2311 /* Child iov entries exceed the max num of parent IO so split it in next round */ 2312 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV, 2, 2); 2313 ut_expected_io_set_iov(expected_io, 0, iov[i + 1].iov_base, 512); 2314 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base + 512, 512); 2315 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2316 2317 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV, 0, 2318 SPDK_BDEV_IO_NUM_CHILD_IOV + 2, io_done, NULL); 2319 CU_ASSERT(rc == 0); 2320 CU_ASSERT(g_io_done == false); 2321 2322 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2323 stub_complete_io(1); 2324 CU_ASSERT(g_io_done == false); 2325 2326 /* Next round */ 2327 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2328 stub_complete_io(1); 2329 CU_ASSERT(g_io_done == true); 2330 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2331 2332 /* This case is similar to the previous one, but the io composed of 2333 * the last few entries of child iov is not enough for a blocklen, so they 2334 * cannot be put into this IO, but wait until the next time. 2335 */ 2336 bdev->max_segment_size = 512; 2337 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2338 g_io_done = false; 2339 2340 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2341 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2342 iov[i].iov_len = 512; 2343 } 2344 2345 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2346 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2347 iov[i].iov_len = 128; 2348 } 2349 2350 /* First child iovcnt is't SPDK_BDEV_IO_NUM_CHILD_IOV but SPDK_BDEV_IO_NUM_CHILD_IOV - 2. 2351 * Because the left 2 iov is not enough for a blocklen. 2352 */ 2353 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2354 SPDK_BDEV_IO_NUM_CHILD_IOV - 2, SPDK_BDEV_IO_NUM_CHILD_IOV - 2); 2355 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2356 ut_expected_io_set_iov(expected_io, i, iov[i].iov_base, iov[i].iov_len); 2357 } 2358 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2359 2360 /* The second child io waits until the end of the first child io before executing. 2361 * Because the iovcnt of the two IOs exceeds the child iovcnt of the parent IO. 2362 * SPDK_BDEV_IO_NUM_CHILD_IOV - 2 to SPDK_BDEV_IO_NUM_CHILD_IOV + 2 2363 */ 2364 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_NUM_CHILD_IOV - 2, 2365 1, 4); 2366 ut_expected_io_set_iov(expected_io, 0, iov[i].iov_base, iov[i].iov_len); 2367 ut_expected_io_set_iov(expected_io, 1, iov[i + 1].iov_base, iov[i + 1].iov_len); 2368 ut_expected_io_set_iov(expected_io, 2, iov[i + 2].iov_base, iov[i + 2].iov_len); 2369 ut_expected_io_set_iov(expected_io, 3, iov[i + 3].iov_base, iov[i + 3].iov_len); 2370 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2371 2372 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2373 SPDK_BDEV_IO_NUM_CHILD_IOV - 1, io_done, NULL); 2374 CU_ASSERT(rc == 0); 2375 CU_ASSERT(g_io_done == false); 2376 2377 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2378 stub_complete_io(1); 2379 CU_ASSERT(g_io_done == false); 2380 2381 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2382 stub_complete_io(1); 2383 CU_ASSERT(g_io_done == true); 2384 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2385 2386 /* A very complicated case. Each sg entry exceeds max_segment_size and 2387 * needs to be split. At the same time, child io must be a multiple of blocklen. 2388 * At the same time, child iovcnt exceeds parent iovcnt. 2389 */ 2390 bdev->max_segment_size = 512 + 128; 2391 bdev->max_num_segments = 3; 2392 g_io_done = false; 2393 2394 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i++) { 2395 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2396 iov[i].iov_len = 512 + 256; 2397 } 2398 2399 for (i = SPDK_BDEV_IO_NUM_CHILD_IOV - 2; i < SPDK_BDEV_IO_NUM_CHILD_IOV + 2; i++) { 2400 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2401 iov[i].iov_len = 512 + 128; 2402 } 2403 2404 /* Child IOs use 9 entries per for() round and 3 * 9 = 27 child iov entries. 2405 * Consume 4 parent IO iov entries per for() round and 6 block size. 2406 * Generate 9 child IOs. 2407 */ 2408 for (i = 0; i < 3; i++) { 2409 uint32_t j = i * 4; 2410 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6, 2, 3); 2411 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2412 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2413 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2414 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2415 2416 /* Child io must be a multiple of blocklen 2417 * iov[j + 2] must be split. If the third entry is also added, 2418 * the multiple of blocklen cannot be guaranteed. But it still 2419 * occupies one iov entry of the parent child iov. 2420 */ 2421 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 2, 2, 2); 2422 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2423 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2424 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2425 2426 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 6 + 4, 2, 3); 2427 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2428 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2429 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2430 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2431 } 2432 2433 /* Child iov position at 27, the 10th child IO 2434 * iov entry index is 3 * 4 and offset is 3 * 6 2435 */ 2436 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 18, 2, 3); 2437 ut_expected_io_set_iov(expected_io, 0, iov[12].iov_base, 640); 2438 ut_expected_io_set_iov(expected_io, 1, iov[12].iov_base + 640, 128); 2439 ut_expected_io_set_iov(expected_io, 2, iov[13].iov_base, 256); 2440 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2441 2442 /* Child iov position at 30, the 11th child IO */ 2443 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 20, 2, 2); 2444 ut_expected_io_set_iov(expected_io, 0, iov[13].iov_base + 256, 512); 2445 ut_expected_io_set_iov(expected_io, 1, iov[14].iov_base, 512); 2446 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2447 2448 /* The 2nd split round and iovpos is 0, the 12th child IO */ 2449 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 22, 2, 3); 2450 ut_expected_io_set_iov(expected_io, 0, iov[14].iov_base + 512, 256); 2451 ut_expected_io_set_iov(expected_io, 1, iov[15].iov_base, 640); 2452 ut_expected_io_set_iov(expected_io, 2, iov[15].iov_base + 640, 128); 2453 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2454 2455 /* Consume 9 child IOs and 27 child iov entries. 2456 * Consume 4 parent IO iov entries per for() round and 6 block size. 2457 * Parent IO iov index start from 16 and block offset start from 24 2458 */ 2459 for (i = 0; i < 3; i++) { 2460 uint32_t j = i * 4 + 16; 2461 uint32_t offset = i * 6 + 24; 2462 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, 2, 3); 2463 ut_expected_io_set_iov(expected_io, 0, iov[j].iov_base, 640); 2464 ut_expected_io_set_iov(expected_io, 1, iov[j].iov_base + 640, 128); 2465 ut_expected_io_set_iov(expected_io, 2, iov[j + 1].iov_base, 256); 2466 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2467 2468 /* Child io must be a multiple of blocklen 2469 * iov[j + 2] must be split. If the third entry is also added, 2470 * the multiple of blocklen cannot be guaranteed. But it still 2471 * occupies one iov entry of the parent child iov. 2472 */ 2473 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 2, 2, 2); 2474 ut_expected_io_set_iov(expected_io, 0, iov[j + 1].iov_base + 256, 512); 2475 ut_expected_io_set_iov(expected_io, 1, iov[j + 2].iov_base, 512); 2476 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2477 2478 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset + 4, 2, 3); 2479 ut_expected_io_set_iov(expected_io, 0, iov[j + 2].iov_base + 512, 256); 2480 ut_expected_io_set_iov(expected_io, 1, iov[j + 3].iov_base, 640); 2481 ut_expected_io_set_iov(expected_io, 2, iov[j + 3].iov_base + 640, 128); 2482 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2483 } 2484 2485 /* The 22th child IO, child iov position at 30 */ 2486 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 42, 1, 1); 2487 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base, 512); 2488 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2489 2490 /* The third round */ 2491 /* Here is the 23nd child IO and child iovpos is 0 */ 2492 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 43, 2, 3); 2493 ut_expected_io_set_iov(expected_io, 0, iov[28].iov_base + 512, 256); 2494 ut_expected_io_set_iov(expected_io, 1, iov[29].iov_base, 640); 2495 ut_expected_io_set_iov(expected_io, 2, iov[29].iov_base + 640, 128); 2496 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2497 2498 /* The 24th child IO */ 2499 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 45, 3, 3); 2500 ut_expected_io_set_iov(expected_io, 0, iov[30].iov_base, 640); 2501 ut_expected_io_set_iov(expected_io, 1, iov[31].iov_base, 640); 2502 ut_expected_io_set_iov(expected_io, 2, iov[32].iov_base, 256); 2503 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2504 2505 /* The 25th child IO */ 2506 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 48, 2, 2); 2507 ut_expected_io_set_iov(expected_io, 0, iov[32].iov_base + 256, 384); 2508 ut_expected_io_set_iov(expected_io, 1, iov[33].iov_base, 640); 2509 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2510 2511 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV + 2, 0, 2512 50, io_done, NULL); 2513 CU_ASSERT(rc == 0); 2514 CU_ASSERT(g_io_done == false); 2515 2516 /* Parent IO supports up to 32 child iovs, so it is calculated that 2517 * a maximum of 11 IOs can be split at a time, and the 2518 * splitting will continue after the first batch is over. 2519 */ 2520 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2521 stub_complete_io(11); 2522 CU_ASSERT(g_io_done == false); 2523 2524 /* The 2nd round */ 2525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 11); 2526 stub_complete_io(11); 2527 CU_ASSERT(g_io_done == false); 2528 2529 /* The last round */ 2530 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2531 stub_complete_io(3); 2532 CU_ASSERT(g_io_done == true); 2533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2534 2535 /* Test an WRITE_ZEROES. This should also not be split. */ 2536 bdev->max_segment_size = 512; 2537 bdev->max_num_segments = 1; 2538 g_io_done = false; 2539 2540 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0); 2541 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2542 2543 rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL); 2544 CU_ASSERT(rc == 0); 2545 CU_ASSERT(g_io_done == false); 2546 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2547 stub_complete_io(1); 2548 CU_ASSERT(g_io_done == true); 2549 2550 /* Test an UNMAP. This should also not be split. */ 2551 g_io_done = false; 2552 2553 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 4, 0); 2554 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2555 2556 rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 4, io_done, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(g_io_done == false); 2559 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2560 stub_complete_io(1); 2561 CU_ASSERT(g_io_done == true); 2562 2563 /* Test a FLUSH. This should also not be split. */ 2564 g_io_done = false; 2565 2566 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 4, 0); 2567 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2568 2569 rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL); 2570 CU_ASSERT(rc == 0); 2571 CU_ASSERT(g_io_done == false); 2572 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2573 stub_complete_io(1); 2574 CU_ASSERT(g_io_done == true); 2575 2576 /* Test a COPY. This should also not be split. */ 2577 g_io_done = false; 2578 2579 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 9, 45, 36); 2580 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2581 2582 rc = spdk_bdev_copy_blocks(desc, io_ch, 9, 45, 36, io_done, NULL); 2583 CU_ASSERT(rc == 0); 2584 CU_ASSERT(g_io_done == false); 2585 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2586 stub_complete_io(1); 2587 CU_ASSERT(g_io_done == true); 2588 2589 /* Test that IOs are split on max_rw_size */ 2590 bdev->max_rw_size = 2; 2591 bdev->max_segment_size = 0; 2592 bdev->max_num_segments = 0; 2593 g_io_done = false; 2594 2595 /* 5 blocks in a contiguous buffer */ 2596 iov[0].iov_base = (void *)0x10000; 2597 iov[0].iov_len = 5 * 512; 2598 2599 /* First: offset=0, num_blocks=2 */ 2600 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2601 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2602 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2603 /* Second: offset=2, num_blocks=2 */ 2604 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 2, 1); 2605 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 2 * 512); 2606 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2607 /* Third: offset=4, num_blocks=1 */ 2608 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2609 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 4 * 512, 512); 2610 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2611 2612 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 5, io_done, NULL); 2613 CU_ASSERT(rc == 0); 2614 CU_ASSERT(g_io_done == false); 2615 2616 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2617 stub_complete_io(3); 2618 CU_ASSERT(g_io_done == true); 2619 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2620 2621 /* Check splitting on both max_rw_size + max_num_segments */ 2622 bdev->max_rw_size = 2; 2623 bdev->max_num_segments = 2; 2624 bdev->max_segment_size = 0; 2625 g_io_done = false; 2626 2627 /* 5 blocks split across 4 iovs */ 2628 iov[0].iov_base = (void *)0x10000; 2629 iov[0].iov_len = 3 * 512; 2630 iov[1].iov_base = (void *)0x20000; 2631 iov[1].iov_len = 256; 2632 iov[2].iov_base = (void *)0x30000; 2633 iov[2].iov_len = 256; 2634 iov[3].iov_base = (void *)0x40000; 2635 iov[3].iov_len = 512; 2636 2637 /* First: offset=0, num_blocks=2, iovcnt=1 */ 2638 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 2, 1); 2639 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 2 * 512); 2640 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2641 /* Second: offset=2, num_blocks=1, iovcnt=1 (max_segment_size prevents from submitting 2642 * the rest of iov[0], and iov[1]+iov[2]) 2643 */ 2644 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 2, 1, 1); 2645 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + 2 * 512, 512); 2646 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2647 /* Third: offset=3, num_blocks=1, iovcnt=2 (iov[1]+iov[2]) */ 2648 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 3, 1, 2); 2649 ut_expected_io_set_iov(expected_io, 0, (void *)0x20000, 256); 2650 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 256); 2651 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2652 /* Fourth: offset=4, num_blocks=1, iovcnt=1 (iov[3]) */ 2653 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 4, 1, 1); 2654 ut_expected_io_set_iov(expected_io, 0, (void *)0x40000, 512); 2655 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2656 2657 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 4, 0, 5, io_done, NULL); 2658 CU_ASSERT(rc == 0); 2659 CU_ASSERT(g_io_done == false); 2660 2661 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2662 stub_complete_io(4); 2663 CU_ASSERT(g_io_done == true); 2664 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2665 2666 /* Check splitting on both max_rw_size + max_segment_size */ 2667 bdev->max_rw_size = 2; 2668 bdev->max_segment_size = 512; 2669 bdev->max_num_segments = 0; 2670 g_io_done = false; 2671 2672 /* 6 blocks in a contiguous buffer */ 2673 iov[0].iov_base = (void *)0x10000; 2674 iov[0].iov_len = 6 * 512; 2675 2676 /* We expect 3 IOs each with 2 blocks and 2 iovs */ 2677 for (i = 0; i < 3; ++i) { 2678 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i * 2, 2, 2); 2679 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 2 * 512, 512); 2680 ut_expected_io_set_iov(expected_io, 1, (void *)0x10000 + i * 2 * 512 + 512, 512); 2681 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2682 } 2683 2684 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, 6, io_done, NULL); 2685 CU_ASSERT(rc == 0); 2686 CU_ASSERT(g_io_done == false); 2687 2688 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2689 stub_complete_io(3); 2690 CU_ASSERT(g_io_done == true); 2691 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2692 2693 /* Check splitting on max_rw_size limited by SPDK_BDEV_IO_NUM_CHILD_IOV */ 2694 bdev->max_rw_size = 1; 2695 bdev->max_segment_size = 0; 2696 bdev->max_num_segments = 0; 2697 g_io_done = false; 2698 2699 /* SPDK_BDEV_IO_NUM_CHILD_IOV + 1 blocks */ 2700 iov[0].iov_base = (void *)0x10000; 2701 iov[0].iov_len = (SPDK_BDEV_IO_NUM_CHILD_IOV + 1) * 512; 2702 2703 /* We expect SPDK_BDEV_IO_NUM_CHILD_IOV + 1 IOs each with a single iov */ 2704 for (i = 0; i < 3; ++i) { 2705 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, i, 1, 1); 2706 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000 + i * 512, 512); 2707 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2708 } 2709 2710 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, 1, 0, SPDK_BDEV_IO_NUM_CHILD_IOV + 1, io_done, NULL); 2711 CU_ASSERT(rc == 0); 2712 CU_ASSERT(g_io_done == false); 2713 2714 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == SPDK_BDEV_IO_NUM_CHILD_IOV); 2715 stub_complete_io(SPDK_BDEV_IO_NUM_CHILD_IOV); 2716 CU_ASSERT(g_io_done == false); 2717 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2718 stub_complete_io(1); 2719 CU_ASSERT(g_io_done == true); 2720 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2721 2722 spdk_put_io_channel(io_ch); 2723 spdk_bdev_close(desc); 2724 free_bdev(bdev); 2725 ut_fini_bdev(); 2726 } 2727 2728 static void 2729 bdev_io_mix_split_test(void) 2730 { 2731 struct spdk_bdev *bdev; 2732 struct spdk_bdev_desc *desc = NULL; 2733 struct spdk_io_channel *io_ch; 2734 struct spdk_bdev_opts bdev_opts = {}; 2735 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 2736 struct ut_expected_io *expected_io; 2737 uint64_t i; 2738 int rc; 2739 2740 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 2741 bdev_opts.bdev_io_pool_size = 512; 2742 bdev_opts.bdev_io_cache_size = 64; 2743 ut_init_bdev(&bdev_opts); 2744 2745 bdev = allocate_bdev("bdev0"); 2746 2747 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 2748 CU_ASSERT(rc == 0); 2749 SPDK_CU_ASSERT_FATAL(desc != NULL); 2750 io_ch = spdk_bdev_get_io_channel(desc); 2751 CU_ASSERT(io_ch != NULL); 2752 2753 /* First case optimal_io_boundary == max_segment_size * max_num_segments */ 2754 bdev->split_on_optimal_io_boundary = true; 2755 bdev->optimal_io_boundary = 16; 2756 2757 bdev->max_segment_size = 512; 2758 bdev->max_num_segments = 16; 2759 g_io_done = false; 2760 2761 /* IO crossing the IO boundary requires split 2762 * Total 2 child IOs. 2763 */ 2764 2765 /* The 1st child IO split the segment_size to multiple segment entry */ 2766 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 2); 2767 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512); 2768 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 512), 512); 2769 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2770 2771 /* The 2nd child IO split the segment_size to multiple segment entry */ 2772 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 2); 2773 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 512); 2774 ut_expected_io_set_iov(expected_io, 1, (void *)(0xF000 + 3 * 512), 512); 2775 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2776 2777 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 4, io_done, NULL); 2778 CU_ASSERT(rc == 0); 2779 CU_ASSERT(g_io_done == false); 2780 2781 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2782 stub_complete_io(2); 2783 CU_ASSERT(g_io_done == true); 2784 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2785 2786 /* Second case optimal_io_boundary > max_segment_size * max_num_segments */ 2787 bdev->max_segment_size = 15 * 512; 2788 bdev->max_num_segments = 1; 2789 g_io_done = false; 2790 2791 /* IO crossing the IO boundary requires split. 2792 * The 1st child IO segment size exceeds the max_segment_size, 2793 * So 1st child IO will be split to multiple segment entry. 2794 * Then it split to 2 child IOs because of the max_num_segments. 2795 * Total 3 child IOs. 2796 */ 2797 2798 /* The first 2 IOs are in an IO boundary. 2799 * Because the optimal_io_boundary > max_segment_size * max_num_segments 2800 * So it split to the first 2 IOs. 2801 */ 2802 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 15, 1); 2803 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 15); 2804 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2805 2806 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 15, 1, 1); 2807 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 15), 512); 2808 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2809 2810 /* The 3rd Child IO is because of the io boundary */ 2811 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2812 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2813 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2814 2815 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2816 CU_ASSERT(rc == 0); 2817 CU_ASSERT(g_io_done == false); 2818 2819 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2820 stub_complete_io(3); 2821 CU_ASSERT(g_io_done == true); 2822 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2823 2824 /* Third case optimal_io_boundary < max_segment_size * max_num_segments */ 2825 bdev->max_segment_size = 17 * 512; 2826 bdev->max_num_segments = 1; 2827 g_io_done = false; 2828 2829 /* IO crossing the IO boundary requires split. 2830 * Child IO does not split. 2831 * Total 2 child IOs. 2832 */ 2833 2834 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, 16, 1); 2835 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 512 * 16); 2836 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2837 2838 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 2, 1); 2839 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 512 * 16), 512 * 2); 2840 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2841 2842 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 0, 18, io_done, NULL); 2843 CU_ASSERT(rc == 0); 2844 CU_ASSERT(g_io_done == false); 2845 2846 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 2847 stub_complete_io(2); 2848 CU_ASSERT(g_io_done == true); 2849 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2850 2851 /* Now set up a more complex, multi-vector command that needs to be split, 2852 * including splitting iovecs. 2853 * optimal_io_boundary < max_segment_size * max_num_segments 2854 */ 2855 bdev->max_segment_size = 3 * 512; 2856 bdev->max_num_segments = 6; 2857 g_io_done = false; 2858 2859 iov[0].iov_base = (void *)0x10000; 2860 iov[0].iov_len = 4 * 512; 2861 iov[1].iov_base = (void *)0x20000; 2862 iov[1].iov_len = 4 * 512; 2863 iov[2].iov_base = (void *)0x30000; 2864 iov[2].iov_len = 10 * 512; 2865 2866 /* IO crossing the IO boundary requires split. 2867 * The 1st child IO segment size exceeds the max_segment_size and after 2868 * splitting segment_size, the num_segments exceeds max_num_segments. 2869 * So 1st child IO will be split to 2 child IOs. 2870 * Total 3 child IOs. 2871 */ 2872 2873 /* The first 2 IOs are in an IO boundary. 2874 * After splitting segment size the segment num exceeds. 2875 * So it splits to 2 child IOs. 2876 */ 2877 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 14, 6); 2878 ut_expected_io_set_iov(expected_io, 0, iov[0].iov_base, 512 * 3); 2879 ut_expected_io_set_iov(expected_io, 1, iov[0].iov_base + 512 * 3, 512); 2880 ut_expected_io_set_iov(expected_io, 2, iov[1].iov_base, 512 * 3); 2881 ut_expected_io_set_iov(expected_io, 3, iov[1].iov_base + 512 * 3, 512); 2882 ut_expected_io_set_iov(expected_io, 4, iov[2].iov_base, 512 * 3); 2883 ut_expected_io_set_iov(expected_io, 5, iov[2].iov_base + 512 * 3, 512 * 3); 2884 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2885 2886 /* The 2nd child IO has the left segment entry */ 2887 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 2888 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 6, 512 * 2); 2889 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2890 2891 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 2, 1); 2892 ut_expected_io_set_iov(expected_io, 0, iov[2].iov_base + 512 * 8, 512 * 2); 2893 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2894 2895 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 0, 18, io_done, NULL); 2896 CU_ASSERT(rc == 0); 2897 CU_ASSERT(g_io_done == false); 2898 2899 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 2900 stub_complete_io(3); 2901 CU_ASSERT(g_io_done == true); 2902 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2903 2904 /* A very complicated case. Each sg entry exceeds max_segment_size 2905 * and split on io boundary. 2906 * optimal_io_boundary < max_segment_size * max_num_segments 2907 */ 2908 bdev->max_segment_size = 3 * 512; 2909 bdev->max_num_segments = SPDK_BDEV_IO_NUM_CHILD_IOV; 2910 g_io_done = false; 2911 2912 for (i = 0; i < 20; i++) { 2913 iov[i].iov_base = (void *)((i + 1) * 0x10000); 2914 iov[i].iov_len = 512 * 4; 2915 } 2916 2917 /* IO crossing the IO boundary requires split. 2918 * 80 block length can split 5 child IOs base on offset and IO boundary. 2919 * Each iov entry needs to be split to 2 entries because of max_segment_size 2920 * Total 5 child IOs. 2921 */ 2922 2923 /* 4 iov entries are in an IO boundary and each iov entry splits to 2. 2924 * So each child IO occupies 8 child iov entries. 2925 */ 2926 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 16, 8); 2927 for (i = 0; i < 4; i++) { 2928 int iovcnt = i * 2; 2929 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2930 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2931 } 2932 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2933 2934 /* 2nd child IO and total 16 child iov entries of parent IO */ 2935 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 8); 2936 for (i = 4; i < 8; i++) { 2937 int iovcnt = (i - 4) * 2; 2938 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2939 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2940 } 2941 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2942 2943 /* 3rd child IO and total 24 child iov entries of parent IO */ 2944 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 16, 8); 2945 for (i = 8; i < 12; i++) { 2946 int iovcnt = (i - 8) * 2; 2947 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2948 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2949 } 2950 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2951 2952 /* 4th child IO and total 32 child iov entries of parent IO */ 2953 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 48, 16, 8); 2954 for (i = 12; i < 16; i++) { 2955 int iovcnt = (i - 12) * 2; 2956 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2957 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2958 } 2959 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2960 2961 /* 5th child IO and because of the child iov entry it should be split 2962 * in next round. 2963 */ 2964 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 64, 16, 8); 2965 for (i = 16; i < 20; i++) { 2966 int iovcnt = (i - 16) * 2; 2967 ut_expected_io_set_iov(expected_io, iovcnt, iov[i].iov_base, 512 * 3); 2968 ut_expected_io_set_iov(expected_io, iovcnt + 1, iov[i].iov_base + 512 * 3, 512); 2969 } 2970 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 2971 2972 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 20, 0, 80, io_done, NULL); 2973 CU_ASSERT(rc == 0); 2974 CU_ASSERT(g_io_done == false); 2975 2976 /* First split round */ 2977 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 2978 stub_complete_io(4); 2979 CU_ASSERT(g_io_done == false); 2980 2981 /* Second split round */ 2982 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 2983 stub_complete_io(1); 2984 CU_ASSERT(g_io_done == true); 2985 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 2986 2987 spdk_put_io_channel(io_ch); 2988 spdk_bdev_close(desc); 2989 free_bdev(bdev); 2990 ut_fini_bdev(); 2991 } 2992 2993 static void 2994 bdev_io_split_with_io_wait(void) 2995 { 2996 struct spdk_bdev *bdev; 2997 struct spdk_bdev_desc *desc = NULL; 2998 struct spdk_io_channel *io_ch; 2999 struct spdk_bdev_channel *channel; 3000 struct spdk_bdev_mgmt_channel *mgmt_ch; 3001 struct spdk_bdev_opts bdev_opts = {}; 3002 struct iovec iov[3]; 3003 struct ut_expected_io *expected_io; 3004 int rc; 3005 3006 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3007 bdev_opts.bdev_io_pool_size = 2; 3008 bdev_opts.bdev_io_cache_size = 1; 3009 ut_init_bdev(&bdev_opts); 3010 3011 bdev = allocate_bdev("bdev0"); 3012 3013 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3014 CU_ASSERT(rc == 0); 3015 CU_ASSERT(desc != NULL); 3016 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3017 io_ch = spdk_bdev_get_io_channel(desc); 3018 CU_ASSERT(io_ch != NULL); 3019 channel = spdk_io_channel_get_ctx(io_ch); 3020 mgmt_ch = channel->shared_resource->mgmt_ch; 3021 3022 bdev->optimal_io_boundary = 16; 3023 bdev->split_on_optimal_io_boundary = true; 3024 3025 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL); 3026 CU_ASSERT(rc == 0); 3027 3028 /* Now test that a single-vector command is split correctly. 3029 * Offset 14, length 8, payload 0xF000 3030 * Child - Offset 14, length 2, payload 0xF000 3031 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 3032 * 3033 * Set up the expected values before calling spdk_bdev_read_blocks 3034 */ 3035 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 3036 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 3037 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3038 3039 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 3040 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 3041 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3042 3043 /* The following children will be submitted sequentially due to the capacity of 3044 * spdk_bdev_io. 3045 */ 3046 3047 /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */ 3048 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL); 3049 CU_ASSERT(rc == 0); 3050 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3051 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3052 3053 /* Completing the first read I/O will submit the first child */ 3054 stub_complete_io(1); 3055 CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 3056 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3057 3058 /* Completing the first child will submit the second child */ 3059 stub_complete_io(1); 3060 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3061 3062 /* Complete the second child I/O. This should result in our callback getting 3063 * invoked since the parent I/O is now complete. 3064 */ 3065 stub_complete_io(1); 3066 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3067 3068 /* Now set up a more complex, multi-vector command that needs to be split, 3069 * including splitting iovecs. 3070 */ 3071 iov[0].iov_base = (void *)0x10000; 3072 iov[0].iov_len = 512; 3073 iov[1].iov_base = (void *)0x20000; 3074 iov[1].iov_len = 20 * 512; 3075 iov[2].iov_base = (void *)0x30000; 3076 iov[2].iov_len = 11 * 512; 3077 3078 g_io_done = false; 3079 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2); 3080 ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512); 3081 ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512); 3082 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3083 3084 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1); 3085 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512); 3086 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3087 3088 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2); 3089 ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512); 3090 ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512); 3091 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3092 3093 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL); 3094 CU_ASSERT(rc == 0); 3095 CU_ASSERT(g_io_done == false); 3096 3097 /* The following children will be submitted sequentially due to the capacity of 3098 * spdk_bdev_io. 3099 */ 3100 3101 /* Completing the first child will submit the second child */ 3102 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3103 stub_complete_io(1); 3104 CU_ASSERT(g_io_done == false); 3105 3106 /* Completing the second child will submit the third child */ 3107 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3108 stub_complete_io(1); 3109 CU_ASSERT(g_io_done == false); 3110 3111 /* Completing the third child will result in our callback getting invoked 3112 * since the parent I/O is now complete. 3113 */ 3114 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 3115 stub_complete_io(1); 3116 CU_ASSERT(g_io_done == true); 3117 3118 CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io)); 3119 3120 spdk_put_io_channel(io_ch); 3121 spdk_bdev_close(desc); 3122 free_bdev(bdev); 3123 ut_fini_bdev(); 3124 } 3125 3126 static void 3127 bdev_io_write_unit_split_test(void) 3128 { 3129 struct spdk_bdev *bdev; 3130 struct spdk_bdev_desc *desc = NULL; 3131 struct spdk_io_channel *io_ch; 3132 struct spdk_bdev_opts bdev_opts = {}; 3133 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 4]; 3134 struct ut_expected_io *expected_io; 3135 uint64_t i; 3136 int rc; 3137 3138 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3139 bdev_opts.bdev_io_pool_size = 512; 3140 bdev_opts.bdev_io_cache_size = 64; 3141 ut_init_bdev(&bdev_opts); 3142 3143 bdev = allocate_bdev("bdev0"); 3144 3145 rc = spdk_bdev_open_ext(bdev->name, true, bdev_ut_event_cb, NULL, &desc); 3146 CU_ASSERT(rc == 0); 3147 SPDK_CU_ASSERT_FATAL(desc != NULL); 3148 io_ch = spdk_bdev_get_io_channel(desc); 3149 CU_ASSERT(io_ch != NULL); 3150 3151 /* Write I/O 2x larger than write_unit_size should get split into 2 I/Os */ 3152 bdev->write_unit_size = 32; 3153 bdev->split_on_write_unit = true; 3154 g_io_done = false; 3155 3156 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, 32, 1); 3157 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 32 * 512); 3158 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3159 3160 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 32, 1); 3161 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 32 * 512), 32 * 512); 3162 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3163 3164 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3165 CU_ASSERT(rc == 0); 3166 CU_ASSERT(g_io_done == false); 3167 3168 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3169 stub_complete_io(2); 3170 CU_ASSERT(g_io_done == true); 3171 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3172 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3173 3174 /* Same as above but with optimal_io_boundary < write_unit_size - the I/O should be split 3175 * based on write_unit_size, not optimal_io_boundary */ 3176 bdev->split_on_optimal_io_boundary = true; 3177 bdev->optimal_io_boundary = 16; 3178 g_io_done = false; 3179 3180 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 64, io_done, NULL); 3181 CU_ASSERT(rc == 0); 3182 CU_ASSERT(g_io_done == false); 3183 3184 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3185 stub_complete_io(2); 3186 CU_ASSERT(g_io_done == true); 3187 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3188 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3189 3190 /* Write I/O should fail if it is smaller than write_unit_size */ 3191 g_io_done = false; 3192 3193 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 31, io_done, NULL); 3194 CU_ASSERT(rc == 0); 3195 CU_ASSERT(g_io_done == false); 3196 3197 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3198 poll_threads(); 3199 CU_ASSERT(g_io_done == true); 3200 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3201 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3202 3203 /* Same for I/O not aligned to write_unit_size */ 3204 g_io_done = false; 3205 3206 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 1, 32, io_done, NULL); 3207 CU_ASSERT(rc == 0); 3208 CU_ASSERT(g_io_done == false); 3209 3210 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3211 poll_threads(); 3212 CU_ASSERT(g_io_done == true); 3213 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3214 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3215 3216 /* Write should fail if it needs to be split but there are not enough iovs to submit 3217 * an entire write unit */ 3218 bdev->write_unit_size = SPDK_COUNTOF(iov) / 2; 3219 g_io_done = false; 3220 3221 for (i = 0; i < SPDK_COUNTOF(iov); i++) { 3222 iov[i].iov_base = (void *)(0x1000 + 512 * i); 3223 iov[i].iov_len = 512; 3224 } 3225 3226 rc = spdk_bdev_writev_blocks(desc, io_ch, iov, SPDK_COUNTOF(iov), 0, SPDK_COUNTOF(iov), 3227 io_done, NULL); 3228 CU_ASSERT(rc == 0); 3229 CU_ASSERT(g_io_done == false); 3230 3231 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3232 poll_threads(); 3233 CU_ASSERT(g_io_done == true); 3234 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 3235 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 3236 3237 spdk_put_io_channel(io_ch); 3238 spdk_bdev_close(desc); 3239 free_bdev(bdev); 3240 ut_fini_bdev(); 3241 } 3242 3243 static void 3244 bdev_io_alignment(void) 3245 { 3246 struct spdk_bdev *bdev; 3247 struct spdk_bdev_desc *desc = NULL; 3248 struct spdk_io_channel *io_ch; 3249 struct spdk_bdev_opts bdev_opts = {}; 3250 int rc; 3251 void *buf = NULL; 3252 struct iovec iovs[2]; 3253 int iovcnt; 3254 uint64_t alignment; 3255 3256 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3257 bdev_opts.bdev_io_pool_size = 20; 3258 bdev_opts.bdev_io_cache_size = 2; 3259 ut_init_bdev(&bdev_opts); 3260 3261 fn_table.submit_request = stub_submit_request_get_buf; 3262 bdev = allocate_bdev("bdev0"); 3263 3264 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3265 CU_ASSERT(rc == 0); 3266 CU_ASSERT(desc != NULL); 3267 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3268 io_ch = spdk_bdev_get_io_channel(desc); 3269 CU_ASSERT(io_ch != NULL); 3270 3271 /* Create aligned buffer */ 3272 rc = posix_memalign(&buf, 4096, 8192); 3273 SPDK_CU_ASSERT_FATAL(rc == 0); 3274 3275 /* Pass aligned single buffer with no alignment required */ 3276 alignment = 1; 3277 bdev->required_alignment = spdk_u32log2(alignment); 3278 3279 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3280 CU_ASSERT(rc == 0); 3281 stub_complete_io(1); 3282 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3283 alignment)); 3284 3285 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 0, 1, io_done, NULL); 3286 CU_ASSERT(rc == 0); 3287 stub_complete_io(1); 3288 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3289 alignment)); 3290 3291 /* Pass unaligned single buffer with no alignment required */ 3292 alignment = 1; 3293 bdev->required_alignment = spdk_u32log2(alignment); 3294 3295 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3296 CU_ASSERT(rc == 0); 3297 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3298 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3299 stub_complete_io(1); 3300 3301 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3302 CU_ASSERT(rc == 0); 3303 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3304 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4); 3305 stub_complete_io(1); 3306 3307 /* Pass unaligned single buffer with 512 alignment required */ 3308 alignment = 512; 3309 bdev->required_alignment = spdk_u32log2(alignment); 3310 3311 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3312 CU_ASSERT(rc == 0); 3313 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3314 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3315 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3316 alignment)); 3317 stub_complete_io(1); 3318 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3319 3320 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL); 3321 CU_ASSERT(rc == 0); 3322 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3323 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3324 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3325 alignment)); 3326 stub_complete_io(1); 3327 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3328 3329 /* Pass unaligned single buffer with 4096 alignment required */ 3330 alignment = 4096; 3331 bdev->required_alignment = spdk_u32log2(alignment); 3332 3333 rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3334 CU_ASSERT(rc == 0); 3335 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3336 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3337 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3338 alignment)); 3339 stub_complete_io(1); 3340 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3341 3342 rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL); 3343 CU_ASSERT(rc == 0); 3344 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1); 3345 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3346 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3347 alignment)); 3348 stub_complete_io(1); 3349 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3350 3351 /* Pass aligned iovs with no alignment required */ 3352 alignment = 1; 3353 bdev->required_alignment = spdk_u32log2(alignment); 3354 3355 iovcnt = 1; 3356 iovs[0].iov_base = buf; 3357 iovs[0].iov_len = 512; 3358 3359 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3360 CU_ASSERT(rc == 0); 3361 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3362 stub_complete_io(1); 3363 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3364 3365 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3366 CU_ASSERT(rc == 0); 3367 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3368 stub_complete_io(1); 3369 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3370 3371 /* Pass unaligned iovs with no alignment required */ 3372 alignment = 1; 3373 bdev->required_alignment = spdk_u32log2(alignment); 3374 3375 iovcnt = 2; 3376 iovs[0].iov_base = buf + 16; 3377 iovs[0].iov_len = 256; 3378 iovs[1].iov_base = buf + 16 + 256 + 32; 3379 iovs[1].iov_len = 256; 3380 3381 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3382 CU_ASSERT(rc == 0); 3383 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3384 stub_complete_io(1); 3385 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3386 3387 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3388 CU_ASSERT(rc == 0); 3389 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3390 stub_complete_io(1); 3391 CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base); 3392 3393 /* Pass unaligned iov with 2048 alignment required */ 3394 alignment = 2048; 3395 bdev->required_alignment = spdk_u32log2(alignment); 3396 3397 iovcnt = 2; 3398 iovs[0].iov_base = buf + 16; 3399 iovs[0].iov_len = 256; 3400 iovs[1].iov_base = buf + 16 + 256 + 32; 3401 iovs[1].iov_len = 256; 3402 3403 rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3404 CU_ASSERT(rc == 0); 3405 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3406 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3407 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3408 alignment)); 3409 stub_complete_io(1); 3410 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3411 3412 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3413 CU_ASSERT(rc == 0); 3414 CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt); 3415 CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov); 3416 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3417 alignment)); 3418 stub_complete_io(1); 3419 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3420 3421 /* Pass iov without allocated buffer without alignment required */ 3422 alignment = 1; 3423 bdev->required_alignment = spdk_u32log2(alignment); 3424 3425 iovcnt = 1; 3426 iovs[0].iov_base = NULL; 3427 iovs[0].iov_len = 0; 3428 3429 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3430 CU_ASSERT(rc == 0); 3431 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3432 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3433 alignment)); 3434 stub_complete_io(1); 3435 3436 /* Pass iov without allocated buffer with 1024 alignment required */ 3437 alignment = 1024; 3438 bdev->required_alignment = spdk_u32log2(alignment); 3439 3440 iovcnt = 1; 3441 iovs[0].iov_base = NULL; 3442 iovs[0].iov_len = 0; 3443 3444 rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL); 3445 CU_ASSERT(rc == 0); 3446 CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false); 3447 CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt, 3448 alignment)); 3449 stub_complete_io(1); 3450 3451 spdk_put_io_channel(io_ch); 3452 spdk_bdev_close(desc); 3453 free_bdev(bdev); 3454 fn_table.submit_request = stub_submit_request; 3455 ut_fini_bdev(); 3456 3457 free(buf); 3458 } 3459 3460 static void 3461 bdev_io_alignment_with_boundary(void) 3462 { 3463 struct spdk_bdev *bdev; 3464 struct spdk_bdev_desc *desc = NULL; 3465 struct spdk_io_channel *io_ch; 3466 struct spdk_bdev_opts bdev_opts = {}; 3467 int rc; 3468 void *buf = NULL; 3469 struct iovec iovs[2]; 3470 int iovcnt; 3471 uint64_t alignment; 3472 3473 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 3474 bdev_opts.bdev_io_pool_size = 20; 3475 bdev_opts.bdev_io_cache_size = 2; 3476 bdev_opts.opts_size = sizeof(bdev_opts); 3477 ut_init_bdev(&bdev_opts); 3478 3479 fn_table.submit_request = stub_submit_request_get_buf; 3480 bdev = allocate_bdev("bdev0"); 3481 3482 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 3483 CU_ASSERT(rc == 0); 3484 CU_ASSERT(desc != NULL); 3485 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3486 io_ch = spdk_bdev_get_io_channel(desc); 3487 CU_ASSERT(io_ch != NULL); 3488 3489 /* Create aligned buffer */ 3490 rc = posix_memalign(&buf, 4096, 131072); 3491 SPDK_CU_ASSERT_FATAL(rc == 0); 3492 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3493 3494 #ifdef NOTDEF 3495 /* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */ 3496 alignment = 512; 3497 bdev->required_alignment = spdk_u32log2(alignment); 3498 bdev->optimal_io_boundary = 2; 3499 bdev->split_on_optimal_io_boundary = true; 3500 3501 iovcnt = 1; 3502 iovs[0].iov_base = NULL; 3503 iovs[0].iov_len = 512 * 3; 3504 3505 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3506 CU_ASSERT(rc == 0); 3507 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3508 stub_complete_io(2); 3509 3510 /* 8KiB with 16 IO boundary, allocate large data buffer from bdev layer */ 3511 alignment = 512; 3512 bdev->required_alignment = spdk_u32log2(alignment); 3513 bdev->optimal_io_boundary = 16; 3514 bdev->split_on_optimal_io_boundary = true; 3515 3516 iovcnt = 1; 3517 iovs[0].iov_base = NULL; 3518 iovs[0].iov_len = 512 * 16; 3519 3520 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 16, io_done, NULL); 3521 CU_ASSERT(rc == 0); 3522 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3523 stub_complete_io(2); 3524 3525 /* 512 * 160 with 128 IO boundary, 63.5KiB + 16.5KiB for the two children requests */ 3526 alignment = 512; 3527 bdev->required_alignment = spdk_u32log2(alignment); 3528 bdev->optimal_io_boundary = 128; 3529 bdev->split_on_optimal_io_boundary = true; 3530 3531 iovcnt = 1; 3532 iovs[0].iov_base = buf + 16; 3533 iovs[0].iov_len = 512 * 160; 3534 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3535 CU_ASSERT(rc == 0); 3536 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3537 stub_complete_io(2); 3538 3539 #endif 3540 3541 /* 512 * 3 with 2 IO boundary */ 3542 alignment = 512; 3543 bdev->required_alignment = spdk_u32log2(alignment); 3544 bdev->optimal_io_boundary = 2; 3545 bdev->split_on_optimal_io_boundary = true; 3546 3547 iovcnt = 2; 3548 iovs[0].iov_base = buf + 16; 3549 iovs[0].iov_len = 512; 3550 iovs[1].iov_base = buf + 16 + 512 + 32; 3551 iovs[1].iov_len = 1024; 3552 3553 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3554 CU_ASSERT(rc == 0); 3555 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3556 stub_complete_io(2); 3557 3558 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 3, io_done, NULL); 3559 CU_ASSERT(rc == 0); 3560 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 3561 stub_complete_io(2); 3562 3563 /* 512 * 64 with 32 IO boundary */ 3564 bdev->optimal_io_boundary = 32; 3565 iovcnt = 2; 3566 iovs[0].iov_base = buf + 16; 3567 iovs[0].iov_len = 16384; 3568 iovs[1].iov_base = buf + 16 + 16384 + 32; 3569 iovs[1].iov_len = 16384; 3570 3571 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3572 CU_ASSERT(rc == 0); 3573 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3574 stub_complete_io(3); 3575 3576 rc = spdk_bdev_readv_blocks(desc, io_ch, iovs, iovcnt, 1, 64, io_done, NULL); 3577 CU_ASSERT(rc == 0); 3578 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3); 3579 stub_complete_io(3); 3580 3581 /* 512 * 160 with 32 IO boundary */ 3582 iovcnt = 1; 3583 iovs[0].iov_base = buf + 16; 3584 iovs[0].iov_len = 16384 + 65536; 3585 3586 rc = spdk_bdev_writev_blocks(desc, io_ch, iovs, iovcnt, 1, 160, io_done, NULL); 3587 CU_ASSERT(rc == 0); 3588 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 6); 3589 stub_complete_io(6); 3590 3591 spdk_put_io_channel(io_ch); 3592 spdk_bdev_close(desc); 3593 free_bdev(bdev); 3594 fn_table.submit_request = stub_submit_request; 3595 ut_fini_bdev(); 3596 3597 free(buf); 3598 } 3599 3600 static void 3601 histogram_status_cb(void *cb_arg, int status) 3602 { 3603 g_status = status; 3604 } 3605 3606 static void 3607 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3608 { 3609 g_status = status; 3610 g_histogram = histogram; 3611 } 3612 3613 static void 3614 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count, 3615 uint64_t total, uint64_t so_far) 3616 { 3617 g_count += count; 3618 } 3619 3620 static void 3621 histogram_channel_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram) 3622 { 3623 spdk_histogram_data_fn cb_fn = cb_arg; 3624 3625 g_status = status; 3626 3627 if (status == 0) { 3628 spdk_histogram_data_iterate(histogram, cb_fn, NULL); 3629 } 3630 } 3631 3632 static void 3633 bdev_histograms(void) 3634 { 3635 struct spdk_bdev *bdev; 3636 struct spdk_bdev_desc *desc = NULL; 3637 struct spdk_io_channel *ch; 3638 struct spdk_histogram_data *histogram; 3639 uint8_t buf[4096]; 3640 int rc; 3641 3642 ut_init_bdev(NULL); 3643 3644 bdev = allocate_bdev("bdev"); 3645 3646 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3647 CU_ASSERT(rc == 0); 3648 CU_ASSERT(desc != NULL); 3649 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3650 3651 ch = spdk_bdev_get_io_channel(desc); 3652 CU_ASSERT(ch != NULL); 3653 3654 /* Enable histogram */ 3655 g_status = -1; 3656 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, true); 3657 poll_threads(); 3658 CU_ASSERT(g_status == 0); 3659 CU_ASSERT(bdev->internal.histogram_enabled == true); 3660 3661 /* Allocate histogram */ 3662 histogram = spdk_histogram_data_alloc(); 3663 SPDK_CU_ASSERT_FATAL(histogram != NULL); 3664 3665 /* Check if histogram is zeroed */ 3666 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3667 poll_threads(); 3668 CU_ASSERT(g_status == 0); 3669 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3670 3671 g_count = 0; 3672 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3673 3674 CU_ASSERT(g_count == 0); 3675 3676 rc = spdk_bdev_write_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3677 CU_ASSERT(rc == 0); 3678 3679 spdk_delay_us(10); 3680 stub_complete_io(1); 3681 poll_threads(); 3682 3683 rc = spdk_bdev_read_blocks(desc, ch, buf, 0, 1, io_done, NULL); 3684 CU_ASSERT(rc == 0); 3685 3686 spdk_delay_us(10); 3687 stub_complete_io(1); 3688 poll_threads(); 3689 3690 /* Check if histogram gathered data from all I/O channels */ 3691 g_histogram = NULL; 3692 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3693 poll_threads(); 3694 CU_ASSERT(g_status == 0); 3695 CU_ASSERT(bdev->internal.histogram_enabled == true); 3696 SPDK_CU_ASSERT_FATAL(g_histogram != NULL); 3697 3698 g_count = 0; 3699 spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL); 3700 CU_ASSERT(g_count == 2); 3701 3702 g_count = 0; 3703 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, histogram_io_count); 3704 CU_ASSERT(g_status == 0); 3705 CU_ASSERT(g_count == 2); 3706 3707 /* Disable histogram */ 3708 spdk_bdev_histogram_enable(bdev, histogram_status_cb, NULL, false); 3709 poll_threads(); 3710 CU_ASSERT(g_status == 0); 3711 CU_ASSERT(bdev->internal.histogram_enabled == false); 3712 3713 /* Try to run histogram commands on disabled bdev */ 3714 spdk_bdev_histogram_get(bdev, histogram, histogram_data_cb, NULL); 3715 poll_threads(); 3716 CU_ASSERT(g_status == -EFAULT); 3717 3718 spdk_bdev_channel_get_histogram(ch, histogram_channel_data_cb, NULL); 3719 CU_ASSERT(g_status == -EFAULT); 3720 3721 spdk_histogram_data_free(histogram); 3722 spdk_put_io_channel(ch); 3723 spdk_bdev_close(desc); 3724 free_bdev(bdev); 3725 ut_fini_bdev(); 3726 } 3727 3728 static void 3729 _bdev_compare(bool emulated) 3730 { 3731 struct spdk_bdev *bdev; 3732 struct spdk_bdev_desc *desc = NULL; 3733 struct spdk_io_channel *ioch; 3734 struct ut_expected_io *expected_io; 3735 uint64_t offset, num_blocks; 3736 uint32_t num_completed; 3737 char aa_buf[512]; 3738 char bb_buf[512]; 3739 struct iovec compare_iov; 3740 uint8_t expected_io_type; 3741 int rc; 3742 3743 if (emulated) { 3744 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3745 } else { 3746 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3747 } 3748 3749 memset(aa_buf, 0xaa, sizeof(aa_buf)); 3750 memset(bb_buf, 0xbb, sizeof(bb_buf)); 3751 3752 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3753 3754 ut_init_bdev(NULL); 3755 fn_table.submit_request = stub_submit_request_get_buf; 3756 bdev = allocate_bdev("bdev"); 3757 3758 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3759 CU_ASSERT_EQUAL(rc, 0); 3760 SPDK_CU_ASSERT_FATAL(desc != NULL); 3761 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3762 ioch = spdk_bdev_get_io_channel(desc); 3763 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3764 3765 fn_table.submit_request = stub_submit_request_get_buf; 3766 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3767 3768 offset = 50; 3769 num_blocks = 1; 3770 compare_iov.iov_base = aa_buf; 3771 compare_iov.iov_len = sizeof(aa_buf); 3772 3773 /* 1. successful comparev */ 3774 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3775 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3776 3777 g_io_done = false; 3778 g_compare_read_buf = aa_buf; 3779 g_compare_read_buf_len = sizeof(aa_buf); 3780 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3781 CU_ASSERT_EQUAL(rc, 0); 3782 num_completed = stub_complete_io(1); 3783 CU_ASSERT_EQUAL(num_completed, 1); 3784 CU_ASSERT(g_io_done == true); 3785 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3786 3787 /* 2. miscompare comparev */ 3788 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3789 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3790 3791 g_io_done = false; 3792 g_compare_read_buf = bb_buf; 3793 g_compare_read_buf_len = sizeof(bb_buf); 3794 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3795 CU_ASSERT_EQUAL(rc, 0); 3796 num_completed = stub_complete_io(1); 3797 CU_ASSERT_EQUAL(num_completed, 1); 3798 CU_ASSERT(g_io_done == true); 3799 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3800 3801 /* 3. successful compare */ 3802 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3803 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3804 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3805 3806 g_io_done = false; 3807 g_compare_read_buf = aa_buf; 3808 g_compare_read_buf_len = sizeof(aa_buf); 3809 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3810 CU_ASSERT_EQUAL(rc, 0); 3811 num_completed = stub_complete_io(1); 3812 CU_ASSERT_EQUAL(num_completed, 1); 3813 CU_ASSERT(g_io_done == true); 3814 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3815 3816 /* 4. miscompare compare */ 3817 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3818 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3819 3820 g_io_done = false; 3821 g_compare_read_buf = bb_buf; 3822 g_compare_read_buf_len = sizeof(bb_buf); 3823 rc = spdk_bdev_compare_blocks(desc, ioch, aa_buf, offset, num_blocks, io_done, NULL); 3824 CU_ASSERT_EQUAL(rc, 0); 3825 num_completed = stub_complete_io(1); 3826 CU_ASSERT_EQUAL(num_completed, 1); 3827 CU_ASSERT(g_io_done == true); 3828 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3829 3830 spdk_put_io_channel(ioch); 3831 spdk_bdev_close(desc); 3832 free_bdev(bdev); 3833 fn_table.submit_request = stub_submit_request; 3834 ut_fini_bdev(); 3835 3836 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3837 3838 g_compare_read_buf = NULL; 3839 } 3840 3841 static void 3842 _bdev_compare_with_md(bool emulated) 3843 { 3844 struct spdk_bdev *bdev; 3845 struct spdk_bdev_desc *desc = NULL; 3846 struct spdk_io_channel *ioch; 3847 struct ut_expected_io *expected_io; 3848 uint64_t offset, num_blocks; 3849 uint32_t num_completed; 3850 char buf[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3851 char buf_interleaved_miscompare[1024 + 16 /* 2 * blocklen + 2 * mdlen */]; 3852 char buf_miscompare[1024 /* 2 * blocklen */]; 3853 char md_buf[16]; 3854 char md_buf_miscompare[16]; 3855 struct iovec compare_iov; 3856 uint8_t expected_io_type; 3857 int rc; 3858 3859 if (emulated) { 3860 expected_io_type = SPDK_BDEV_IO_TYPE_READ; 3861 } else { 3862 expected_io_type = SPDK_BDEV_IO_TYPE_COMPARE; 3863 } 3864 3865 memset(buf, 0xaa, sizeof(buf)); 3866 memset(buf_interleaved_miscompare, 0xaa, sizeof(buf_interleaved_miscompare)); 3867 /* make last md different */ 3868 memset(buf_interleaved_miscompare + 1024 + 8, 0xbb, 8); 3869 memset(buf_miscompare, 0xbb, sizeof(buf_miscompare)); 3870 memset(md_buf, 0xaa, 16); 3871 memset(md_buf_miscompare, 0xbb, 16); 3872 3873 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = !emulated; 3874 3875 ut_init_bdev(NULL); 3876 fn_table.submit_request = stub_submit_request_get_buf; 3877 bdev = allocate_bdev("bdev"); 3878 3879 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 3880 CU_ASSERT_EQUAL(rc, 0); 3881 SPDK_CU_ASSERT_FATAL(desc != NULL); 3882 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 3883 ioch = spdk_bdev_get_io_channel(desc); 3884 SPDK_CU_ASSERT_FATAL(ioch != NULL); 3885 3886 fn_table.submit_request = stub_submit_request_get_buf; 3887 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3888 3889 offset = 50; 3890 num_blocks = 2; 3891 3892 /* interleaved md & data */ 3893 bdev->md_interleave = true; 3894 bdev->md_len = 8; 3895 bdev->blocklen = 512 + 8; 3896 compare_iov.iov_base = buf; 3897 compare_iov.iov_len = sizeof(buf); 3898 3899 /* 1. successful compare with md interleaved */ 3900 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3901 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3902 3903 g_io_done = false; 3904 g_compare_read_buf = buf; 3905 g_compare_read_buf_len = sizeof(buf); 3906 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3907 CU_ASSERT_EQUAL(rc, 0); 3908 num_completed = stub_complete_io(1); 3909 CU_ASSERT_EQUAL(num_completed, 1); 3910 CU_ASSERT(g_io_done == true); 3911 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3912 3913 /* 2. miscompare with md interleaved */ 3914 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3915 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3916 3917 g_io_done = false; 3918 g_compare_read_buf = buf_interleaved_miscompare; 3919 g_compare_read_buf_len = sizeof(buf_interleaved_miscompare); 3920 rc = spdk_bdev_comparev_blocks(desc, ioch, &compare_iov, 1, offset, num_blocks, io_done, NULL); 3921 CU_ASSERT_EQUAL(rc, 0); 3922 num_completed = stub_complete_io(1); 3923 CU_ASSERT_EQUAL(num_completed, 1); 3924 CU_ASSERT(g_io_done == true); 3925 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3926 3927 /* Separate data & md buffers */ 3928 bdev->md_interleave = false; 3929 bdev->blocklen = 512; 3930 compare_iov.iov_base = buf; 3931 compare_iov.iov_len = 1024; 3932 3933 /* 3. successful compare with md separated */ 3934 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3935 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3936 3937 g_io_done = false; 3938 g_compare_read_buf = buf; 3939 g_compare_read_buf_len = 1024; 3940 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 3941 g_compare_md_buf = md_buf; 3942 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3943 offset, num_blocks, io_done, NULL); 3944 CU_ASSERT_EQUAL(rc, 0); 3945 num_completed = stub_complete_io(1); 3946 CU_ASSERT_EQUAL(num_completed, 1); 3947 CU_ASSERT(g_io_done == true); 3948 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 3949 3950 /* 4. miscompare with md separated where md buf is different */ 3951 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3952 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3953 3954 g_io_done = false; 3955 g_compare_read_buf = buf; 3956 g_compare_read_buf_len = 1024; 3957 g_compare_md_buf = md_buf_miscompare; 3958 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3959 offset, num_blocks, io_done, NULL); 3960 CU_ASSERT_EQUAL(rc, 0); 3961 num_completed = stub_complete_io(1); 3962 CU_ASSERT_EQUAL(num_completed, 1); 3963 CU_ASSERT(g_io_done == true); 3964 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3965 3966 /* 5. miscompare with md separated where buf is different */ 3967 expected_io = ut_alloc_expected_io(expected_io_type, offset, num_blocks, 0); 3968 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 3969 3970 g_io_done = false; 3971 g_compare_read_buf = buf_miscompare; 3972 g_compare_read_buf_len = sizeof(buf_miscompare); 3973 g_compare_md_buf = md_buf; 3974 rc = spdk_bdev_comparev_blocks_with_md(desc, ioch, &compare_iov, 1, md_buf, 3975 offset, num_blocks, io_done, NULL); 3976 CU_ASSERT_EQUAL(rc, 0); 3977 num_completed = stub_complete_io(1); 3978 CU_ASSERT_EQUAL(num_completed, 1); 3979 CU_ASSERT(g_io_done == true); 3980 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 3981 3982 bdev->md_len = 0; 3983 g_compare_md_buf = NULL; 3984 3985 spdk_put_io_channel(ioch); 3986 spdk_bdev_close(desc); 3987 free_bdev(bdev); 3988 fn_table.submit_request = stub_submit_request; 3989 ut_fini_bdev(); 3990 3991 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 3992 3993 g_compare_read_buf = NULL; 3994 } 3995 3996 static void 3997 bdev_compare(void) 3998 { 3999 _bdev_compare(false); 4000 _bdev_compare_with_md(false); 4001 } 4002 4003 static void 4004 bdev_compare_emulated(void) 4005 { 4006 _bdev_compare(true); 4007 _bdev_compare_with_md(true); 4008 } 4009 4010 static void 4011 bdev_compare_and_write(void) 4012 { 4013 struct spdk_bdev *bdev; 4014 struct spdk_bdev_desc *desc = NULL; 4015 struct spdk_io_channel *ioch; 4016 struct ut_expected_io *expected_io; 4017 uint64_t offset, num_blocks; 4018 uint32_t num_completed; 4019 char aa_buf[512]; 4020 char bb_buf[512]; 4021 char cc_buf[512]; 4022 char write_buf[512]; 4023 struct iovec compare_iov; 4024 struct iovec write_iov; 4025 int rc; 4026 4027 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4028 memset(bb_buf, 0xbb, sizeof(bb_buf)); 4029 memset(cc_buf, 0xcc, sizeof(cc_buf)); 4030 4031 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = false; 4032 4033 ut_init_bdev(NULL); 4034 fn_table.submit_request = stub_submit_request_get_buf; 4035 bdev = allocate_bdev("bdev"); 4036 4037 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4038 CU_ASSERT_EQUAL(rc, 0); 4039 SPDK_CU_ASSERT_FATAL(desc != NULL); 4040 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4041 ioch = spdk_bdev_get_io_channel(desc); 4042 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4043 4044 fn_table.submit_request = stub_submit_request_get_buf; 4045 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4046 4047 offset = 50; 4048 num_blocks = 1; 4049 compare_iov.iov_base = aa_buf; 4050 compare_iov.iov_len = sizeof(aa_buf); 4051 write_iov.iov_base = bb_buf; 4052 write_iov.iov_len = sizeof(bb_buf); 4053 4054 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4055 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4056 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, num_blocks, 0); 4057 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4058 4059 g_io_done = false; 4060 g_compare_read_buf = aa_buf; 4061 g_compare_read_buf_len = sizeof(aa_buf); 4062 memset(write_buf, 0, sizeof(write_buf)); 4063 g_compare_write_buf = write_buf; 4064 g_compare_write_buf_len = sizeof(write_buf); 4065 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4066 offset, num_blocks, io_done, NULL); 4067 /* Trigger range locking */ 4068 poll_threads(); 4069 CU_ASSERT_EQUAL(rc, 0); 4070 num_completed = stub_complete_io(1); 4071 CU_ASSERT_EQUAL(num_completed, 1); 4072 CU_ASSERT(g_io_done == false); 4073 num_completed = stub_complete_io(1); 4074 /* Trigger range unlocking */ 4075 poll_threads(); 4076 CU_ASSERT_EQUAL(num_completed, 1); 4077 CU_ASSERT(g_io_done == true); 4078 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4079 CU_ASSERT(memcmp(write_buf, bb_buf, sizeof(write_buf)) == 0); 4080 4081 /* Test miscompare */ 4082 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, offset, num_blocks, 0); 4083 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4084 4085 g_io_done = false; 4086 g_compare_read_buf = cc_buf; 4087 g_compare_read_buf_len = sizeof(cc_buf); 4088 memset(write_buf, 0, sizeof(write_buf)); 4089 g_compare_write_buf = write_buf; 4090 g_compare_write_buf_len = sizeof(write_buf); 4091 rc = spdk_bdev_comparev_and_writev_blocks(desc, ioch, &compare_iov, 1, &write_iov, 1, 4092 offset, num_blocks, io_done, NULL); 4093 /* Trigger range locking */ 4094 poll_threads(); 4095 CU_ASSERT_EQUAL(rc, 0); 4096 num_completed = stub_complete_io(1); 4097 /* Trigger range unlocking earlier because we expect error here */ 4098 poll_threads(); 4099 CU_ASSERT_EQUAL(num_completed, 1); 4100 CU_ASSERT(g_io_done == true); 4101 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_MISCOMPARE); 4102 num_completed = stub_complete_io(1); 4103 CU_ASSERT_EQUAL(num_completed, 0); 4104 4105 spdk_put_io_channel(ioch); 4106 spdk_bdev_close(desc); 4107 free_bdev(bdev); 4108 fn_table.submit_request = stub_submit_request; 4109 ut_fini_bdev(); 4110 4111 g_io_types_supported[SPDK_BDEV_IO_TYPE_COMPARE] = true; 4112 4113 g_compare_read_buf = NULL; 4114 g_compare_write_buf = NULL; 4115 } 4116 4117 static void 4118 bdev_write_zeroes(void) 4119 { 4120 struct spdk_bdev *bdev; 4121 struct spdk_bdev_desc *desc = NULL; 4122 struct spdk_io_channel *ioch; 4123 struct ut_expected_io *expected_io; 4124 uint64_t offset, num_io_blocks, num_blocks; 4125 uint32_t num_completed, num_requests; 4126 int rc; 4127 4128 ut_init_bdev(NULL); 4129 bdev = allocate_bdev("bdev"); 4130 4131 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4132 CU_ASSERT_EQUAL(rc, 0); 4133 SPDK_CU_ASSERT_FATAL(desc != NULL); 4134 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4135 ioch = spdk_bdev_get_io_channel(desc); 4136 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4137 4138 fn_table.submit_request = stub_submit_request; 4139 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4140 4141 /* First test that if the bdev supports write_zeroes, the request won't be split */ 4142 bdev->md_len = 0; 4143 bdev->blocklen = 4096; 4144 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4145 4146 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 4147 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4148 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4149 CU_ASSERT_EQUAL(rc, 0); 4150 num_completed = stub_complete_io(1); 4151 CU_ASSERT_EQUAL(num_completed, 1); 4152 4153 /* Check that if write zeroes is not supported it'll be replaced by regular writes */ 4154 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, false); 4155 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4156 num_io_blocks = ZERO_BUFFER_SIZE / bdev->blocklen; 4157 num_requests = 2; 4158 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * num_requests; 4159 4160 for (offset = 0; offset < num_requests; ++offset) { 4161 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4162 offset * num_io_blocks, num_io_blocks, 0); 4163 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4164 } 4165 4166 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4167 CU_ASSERT_EQUAL(rc, 0); 4168 num_completed = stub_complete_io(num_requests); 4169 CU_ASSERT_EQUAL(num_completed, num_requests); 4170 4171 /* Check that the splitting is correct if bdev has interleaved metadata */ 4172 bdev->md_interleave = true; 4173 bdev->md_len = 64; 4174 bdev->blocklen = 4096 + 64; 4175 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4176 num_blocks = (ZERO_BUFFER_SIZE / bdev->blocklen) * 2; 4177 4178 num_requests = offset = 0; 4179 while (offset < num_blocks) { 4180 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / bdev->blocklen, num_blocks - offset); 4181 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4182 offset, num_io_blocks, 0); 4183 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4184 offset += num_io_blocks; 4185 num_requests++; 4186 } 4187 4188 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4189 CU_ASSERT_EQUAL(rc, 0); 4190 num_completed = stub_complete_io(num_requests); 4191 CU_ASSERT_EQUAL(num_completed, num_requests); 4192 num_completed = stub_complete_io(num_requests); 4193 assert(num_completed == 0); 4194 4195 /* Check the the same for separate metadata buffer */ 4196 bdev->md_interleave = false; 4197 bdev->md_len = 64; 4198 bdev->blocklen = 4096; 4199 bdev->max_write_zeroes = bdev_get_max_write(bdev, ZERO_BUFFER_SIZE); 4200 4201 num_requests = offset = 0; 4202 while (offset < num_blocks) { 4203 num_io_blocks = spdk_min(ZERO_BUFFER_SIZE / (bdev->blocklen + bdev->md_len), num_blocks); 4204 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 4205 offset, num_io_blocks, 0); 4206 expected_io->md_buf = (char *)g_bdev_mgr.zero_buffer + num_io_blocks * bdev->blocklen; 4207 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4208 offset += num_io_blocks; 4209 num_requests++; 4210 } 4211 4212 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 4213 CU_ASSERT_EQUAL(rc, 0); 4214 num_completed = stub_complete_io(num_requests); 4215 CU_ASSERT_EQUAL(num_completed, num_requests); 4216 4217 ut_enable_io_type(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, true); 4218 spdk_put_io_channel(ioch); 4219 spdk_bdev_close(desc); 4220 free_bdev(bdev); 4221 ut_fini_bdev(); 4222 } 4223 4224 static void 4225 bdev_zcopy_write(void) 4226 { 4227 struct spdk_bdev *bdev; 4228 struct spdk_bdev_desc *desc = NULL; 4229 struct spdk_io_channel *ioch; 4230 struct ut_expected_io *expected_io; 4231 uint64_t offset, num_blocks; 4232 uint32_t num_completed; 4233 char aa_buf[512]; 4234 struct iovec iov; 4235 int rc; 4236 const bool populate = false; 4237 const bool commit = true; 4238 4239 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4240 4241 ut_init_bdev(NULL); 4242 bdev = allocate_bdev("bdev"); 4243 4244 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4245 CU_ASSERT_EQUAL(rc, 0); 4246 SPDK_CU_ASSERT_FATAL(desc != NULL); 4247 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4248 ioch = spdk_bdev_get_io_channel(desc); 4249 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4250 4251 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4252 4253 offset = 50; 4254 num_blocks = 1; 4255 iov.iov_base = NULL; 4256 iov.iov_len = 0; 4257 4258 g_zcopy_read_buf = (void *) 0x1122334455667788UL; 4259 g_zcopy_read_buf_len = (uint32_t) -1; 4260 /* Do a zcopy start for a write (populate=false) */ 4261 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4262 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4263 g_io_done = false; 4264 g_zcopy_write_buf = aa_buf; 4265 g_zcopy_write_buf_len = sizeof(aa_buf); 4266 g_zcopy_bdev_io = NULL; 4267 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4268 CU_ASSERT_EQUAL(rc, 0); 4269 num_completed = stub_complete_io(1); 4270 CU_ASSERT_EQUAL(num_completed, 1); 4271 CU_ASSERT(g_io_done == true); 4272 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4273 /* Check that the iov has been set up */ 4274 CU_ASSERT(iov.iov_base == g_zcopy_write_buf); 4275 CU_ASSERT(iov.iov_len == g_zcopy_write_buf_len); 4276 /* Check that the bdev_io has been saved */ 4277 CU_ASSERT(g_zcopy_bdev_io != NULL); 4278 /* Now do the zcopy end for a write (commit=true) */ 4279 g_io_done = false; 4280 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4281 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4282 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4283 CU_ASSERT_EQUAL(rc, 0); 4284 num_completed = stub_complete_io(1); 4285 CU_ASSERT_EQUAL(num_completed, 1); 4286 CU_ASSERT(g_io_done == true); 4287 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4288 /* Check the g_zcopy are reset by io_done */ 4289 CU_ASSERT(g_zcopy_write_buf == NULL); 4290 CU_ASSERT(g_zcopy_write_buf_len == 0); 4291 /* Check that io_done has freed the g_zcopy_bdev_io */ 4292 CU_ASSERT(g_zcopy_bdev_io == NULL); 4293 4294 /* Check the zcopy read buffer has not been touched which 4295 * ensures that the correct buffers were used. 4296 */ 4297 CU_ASSERT(g_zcopy_read_buf == (void *) 0x1122334455667788UL); 4298 CU_ASSERT(g_zcopy_read_buf_len == (uint32_t) -1); 4299 4300 spdk_put_io_channel(ioch); 4301 spdk_bdev_close(desc); 4302 free_bdev(bdev); 4303 ut_fini_bdev(); 4304 } 4305 4306 static void 4307 bdev_zcopy_read(void) 4308 { 4309 struct spdk_bdev *bdev; 4310 struct spdk_bdev_desc *desc = NULL; 4311 struct spdk_io_channel *ioch; 4312 struct ut_expected_io *expected_io; 4313 uint64_t offset, num_blocks; 4314 uint32_t num_completed; 4315 char aa_buf[512]; 4316 struct iovec iov; 4317 int rc; 4318 const bool populate = true; 4319 const bool commit = false; 4320 4321 memset(aa_buf, 0xaa, sizeof(aa_buf)); 4322 4323 ut_init_bdev(NULL); 4324 bdev = allocate_bdev("bdev"); 4325 4326 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 4327 CU_ASSERT_EQUAL(rc, 0); 4328 SPDK_CU_ASSERT_FATAL(desc != NULL); 4329 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4330 ioch = spdk_bdev_get_io_channel(desc); 4331 SPDK_CU_ASSERT_FATAL(ioch != NULL); 4332 4333 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 4334 4335 offset = 50; 4336 num_blocks = 1; 4337 iov.iov_base = NULL; 4338 iov.iov_len = 0; 4339 4340 g_zcopy_write_buf = (void *) 0x1122334455667788UL; 4341 g_zcopy_write_buf_len = (uint32_t) -1; 4342 4343 /* Do a zcopy start for a read (populate=true) */ 4344 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4345 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4346 g_io_done = false; 4347 g_zcopy_read_buf = aa_buf; 4348 g_zcopy_read_buf_len = sizeof(aa_buf); 4349 g_zcopy_bdev_io = NULL; 4350 rc = spdk_bdev_zcopy_start(desc, ioch, &iov, 1, offset, num_blocks, populate, io_done, NULL); 4351 CU_ASSERT_EQUAL(rc, 0); 4352 num_completed = stub_complete_io(1); 4353 CU_ASSERT_EQUAL(num_completed, 1); 4354 CU_ASSERT(g_io_done == true); 4355 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4356 /* Check that the iov has been set up */ 4357 CU_ASSERT(iov.iov_base == g_zcopy_read_buf); 4358 CU_ASSERT(iov.iov_len == g_zcopy_read_buf_len); 4359 /* Check that the bdev_io has been saved */ 4360 CU_ASSERT(g_zcopy_bdev_io != NULL); 4361 4362 /* Now do the zcopy end for a read (commit=false) */ 4363 g_io_done = false; 4364 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_ZCOPY, offset, num_blocks, 0); 4365 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 4366 rc = spdk_bdev_zcopy_end(g_zcopy_bdev_io, commit, io_done, NULL); 4367 CU_ASSERT_EQUAL(rc, 0); 4368 num_completed = stub_complete_io(1); 4369 CU_ASSERT_EQUAL(num_completed, 1); 4370 CU_ASSERT(g_io_done == true); 4371 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 4372 /* Check the g_zcopy are reset by io_done */ 4373 CU_ASSERT(g_zcopy_read_buf == NULL); 4374 CU_ASSERT(g_zcopy_read_buf_len == 0); 4375 /* Check that io_done has freed the g_zcopy_bdev_io */ 4376 CU_ASSERT(g_zcopy_bdev_io == NULL); 4377 4378 /* Check the zcopy write buffer has not been touched which 4379 * ensures that the correct buffers were used. 4380 */ 4381 CU_ASSERT(g_zcopy_write_buf == (void *) 0x1122334455667788UL); 4382 CU_ASSERT(g_zcopy_write_buf_len == (uint32_t) -1); 4383 4384 spdk_put_io_channel(ioch); 4385 spdk_bdev_close(desc); 4386 free_bdev(bdev); 4387 ut_fini_bdev(); 4388 } 4389 4390 static void 4391 bdev_open_while_hotremove(void) 4392 { 4393 struct spdk_bdev *bdev; 4394 struct spdk_bdev_desc *desc[2] = {}; 4395 int rc; 4396 4397 bdev = allocate_bdev("bdev"); 4398 4399 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[0]); 4400 CU_ASSERT(rc == 0); 4401 SPDK_CU_ASSERT_FATAL(desc[0] != NULL); 4402 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc[0])); 4403 4404 spdk_bdev_unregister(bdev, NULL, NULL); 4405 /* Bdev unregister is handled asynchronously. Poll thread to complete. */ 4406 poll_threads(); 4407 4408 rc = spdk_bdev_open_ext("bdev", false, bdev_ut_event_cb, NULL, &desc[1]); 4409 CU_ASSERT(rc == -ENODEV); 4410 SPDK_CU_ASSERT_FATAL(desc[1] == NULL); 4411 4412 spdk_bdev_close(desc[0]); 4413 free_bdev(bdev); 4414 } 4415 4416 static void 4417 bdev_close_while_hotremove(void) 4418 { 4419 struct spdk_bdev *bdev; 4420 struct spdk_bdev_desc *desc = NULL; 4421 int rc = 0; 4422 4423 bdev = allocate_bdev("bdev"); 4424 4425 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc, &desc); 4426 CU_ASSERT_EQUAL(rc, 0); 4427 SPDK_CU_ASSERT_FATAL(desc != NULL); 4428 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4429 4430 /* Simulate hot-unplug by unregistering bdev */ 4431 g_event_type1 = 0xFF; 4432 g_unregister_arg = NULL; 4433 g_unregister_rc = -1; 4434 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4435 /* Close device while remove event is in flight */ 4436 spdk_bdev_close(desc); 4437 4438 /* Ensure that unregister callback is delayed */ 4439 CU_ASSERT_EQUAL(g_unregister_arg, NULL); 4440 CU_ASSERT_EQUAL(g_unregister_rc, -1); 4441 4442 poll_threads(); 4443 4444 /* Event callback shall not be issued because device was closed */ 4445 CU_ASSERT_EQUAL(g_event_type1, 0xFF); 4446 /* Unregister callback is issued */ 4447 CU_ASSERT_EQUAL(g_unregister_arg, (void *)0x12345678); 4448 CU_ASSERT_EQUAL(g_unregister_rc, 0); 4449 4450 free_bdev(bdev); 4451 } 4452 4453 static void 4454 bdev_open_ext_test(void) 4455 { 4456 struct spdk_bdev *bdev; 4457 struct spdk_bdev_desc *desc1 = NULL; 4458 struct spdk_bdev_desc *desc2 = NULL; 4459 int rc = 0; 4460 4461 bdev = allocate_bdev("bdev"); 4462 4463 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4464 CU_ASSERT_EQUAL(rc, -EINVAL); 4465 4466 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4467 CU_ASSERT_EQUAL(rc, 0); 4468 4469 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4470 CU_ASSERT_EQUAL(rc, 0); 4471 4472 g_event_type1 = 0xFF; 4473 g_event_type2 = 0xFF; 4474 4475 /* Simulate hot-unplug by unregistering bdev */ 4476 spdk_bdev_unregister(bdev, NULL, NULL); 4477 poll_threads(); 4478 4479 /* Check if correct events have been triggered in event callback fn */ 4480 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4481 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4482 4483 free_bdev(bdev); 4484 poll_threads(); 4485 } 4486 4487 static void 4488 bdev_open_ext_unregister(void) 4489 { 4490 struct spdk_bdev *bdev; 4491 struct spdk_bdev_desc *desc1 = NULL; 4492 struct spdk_bdev_desc *desc2 = NULL; 4493 struct spdk_bdev_desc *desc3 = NULL; 4494 struct spdk_bdev_desc *desc4 = NULL; 4495 int rc = 0; 4496 4497 bdev = allocate_bdev("bdev"); 4498 4499 rc = spdk_bdev_open_ext("bdev", true, NULL, NULL, &desc1); 4500 CU_ASSERT_EQUAL(rc, -EINVAL); 4501 4502 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb1, &desc1, &desc1); 4503 CU_ASSERT_EQUAL(rc, 0); 4504 4505 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb2, &desc2, &desc2); 4506 CU_ASSERT_EQUAL(rc, 0); 4507 4508 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb3, &desc3, &desc3); 4509 CU_ASSERT_EQUAL(rc, 0); 4510 4511 rc = spdk_bdev_open_ext("bdev", true, bdev_open_cb4, &desc4, &desc4); 4512 CU_ASSERT_EQUAL(rc, 0); 4513 4514 g_event_type1 = 0xFF; 4515 g_event_type2 = 0xFF; 4516 g_event_type3 = 0xFF; 4517 g_event_type4 = 0xFF; 4518 4519 g_unregister_arg = NULL; 4520 g_unregister_rc = -1; 4521 4522 /* Simulate hot-unplug by unregistering bdev */ 4523 spdk_bdev_unregister(bdev, bdev_unregister_cb, (void *)0x12345678); 4524 4525 /* 4526 * Unregister is handled asynchronously and event callback 4527 * (i.e., above bdev_open_cbN) will be called. 4528 * For bdev_open_cb3 and bdev_open_cb4, it is intended to not 4529 * close the desc3 and desc4 so that the bdev is not closed. 4530 */ 4531 poll_threads(); 4532 4533 /* Check if correct events have been triggered in event callback fn */ 4534 CU_ASSERT_EQUAL(g_event_type1, SPDK_BDEV_EVENT_REMOVE); 4535 CU_ASSERT_EQUAL(g_event_type2, SPDK_BDEV_EVENT_REMOVE); 4536 CU_ASSERT_EQUAL(g_event_type3, SPDK_BDEV_EVENT_REMOVE); 4537 CU_ASSERT_EQUAL(g_event_type4, SPDK_BDEV_EVENT_REMOVE); 4538 4539 /* Check that unregister callback is delayed */ 4540 CU_ASSERT(g_unregister_arg == NULL); 4541 CU_ASSERT(g_unregister_rc == -1); 4542 4543 /* 4544 * Explicitly close desc3. As desc4 is still opened there, the 4545 * unergister callback is still delayed to execute. 4546 */ 4547 spdk_bdev_close(desc3); 4548 CU_ASSERT(g_unregister_arg == NULL); 4549 CU_ASSERT(g_unregister_rc == -1); 4550 4551 /* 4552 * Explicitly close desc4 to trigger the ongoing bdev unregister 4553 * operation after last desc is closed. 4554 */ 4555 spdk_bdev_close(desc4); 4556 4557 /* Poll the thread for the async unregister operation */ 4558 poll_threads(); 4559 4560 /* Check that unregister callback is executed */ 4561 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 4562 CU_ASSERT(g_unregister_rc == 0); 4563 4564 free_bdev(bdev); 4565 poll_threads(); 4566 } 4567 4568 struct timeout_io_cb_arg { 4569 struct iovec iov; 4570 uint8_t type; 4571 }; 4572 4573 static int 4574 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch) 4575 { 4576 struct spdk_bdev_io *bdev_io; 4577 int n = 0; 4578 4579 if (!ch) { 4580 return -1; 4581 } 4582 4583 TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) { 4584 n++; 4585 } 4586 4587 return n; 4588 } 4589 4590 static void 4591 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io) 4592 { 4593 struct timeout_io_cb_arg *ctx = cb_arg; 4594 4595 ctx->type = bdev_io->type; 4596 ctx->iov.iov_base = bdev_io->iov.iov_base; 4597 ctx->iov.iov_len = bdev_io->iov.iov_len; 4598 } 4599 4600 static void 4601 bdev_set_io_timeout(void) 4602 { 4603 struct spdk_bdev *bdev; 4604 struct spdk_bdev_desc *desc = NULL; 4605 struct spdk_io_channel *io_ch = NULL; 4606 struct spdk_bdev_channel *bdev_ch = NULL; 4607 struct timeout_io_cb_arg cb_arg; 4608 4609 ut_init_bdev(NULL); 4610 bdev = allocate_bdev("bdev"); 4611 4612 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4613 SPDK_CU_ASSERT_FATAL(desc != NULL); 4614 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4615 4616 io_ch = spdk_bdev_get_io_channel(desc); 4617 CU_ASSERT(io_ch != NULL); 4618 4619 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4620 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4621 4622 /* This is the part1. 4623 * We will check the bdev_ch->io_submitted list 4624 * TO make sure that it can link IOs and only the user submitted IOs 4625 */ 4626 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4627 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4628 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4629 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4630 stub_complete_io(1); 4631 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4632 stub_complete_io(1); 4633 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4634 4635 /* Split IO */ 4636 bdev->optimal_io_boundary = 16; 4637 bdev->split_on_optimal_io_boundary = true; 4638 4639 /* Now test that a single-vector command is split correctly. 4640 * Offset 14, length 8, payload 0xF000 4641 * Child - Offset 14, length 2, payload 0xF000 4642 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 4643 * 4644 * Set up the expected values before calling spdk_bdev_read_blocks 4645 */ 4646 CU_ASSERT(spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4647 /* We count all submitted IOs including IO that are generated by splitting. */ 4648 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 3); 4649 stub_complete_io(1); 4650 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4651 stub_complete_io(1); 4652 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4653 4654 /* Also include the reset IO */ 4655 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4656 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4657 poll_threads(); 4658 stub_complete_io(1); 4659 poll_threads(); 4660 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4661 4662 /* This is part2 4663 * Test the desc timeout poller register 4664 */ 4665 4666 /* Successfully set the timeout */ 4667 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4668 CU_ASSERT(desc->io_timeout_poller != NULL); 4669 CU_ASSERT(desc->timeout_in_sec == 30); 4670 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4671 CU_ASSERT(desc->cb_arg == &cb_arg); 4672 4673 /* Change the timeout limit */ 4674 CU_ASSERT(spdk_bdev_set_timeout(desc, 20, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4675 CU_ASSERT(desc->io_timeout_poller != NULL); 4676 CU_ASSERT(desc->timeout_in_sec == 20); 4677 CU_ASSERT(desc->cb_fn == bdev_channel_io_timeout_cb); 4678 CU_ASSERT(desc->cb_arg == &cb_arg); 4679 4680 /* Disable the timeout */ 4681 CU_ASSERT(spdk_bdev_set_timeout(desc, 0, NULL, NULL) == 0); 4682 CU_ASSERT(desc->io_timeout_poller == NULL); 4683 4684 /* This the part3 4685 * We will test to catch timeout IO and check whether the IO is 4686 * the submitted one. 4687 */ 4688 memset(&cb_arg, 0, sizeof(cb_arg)); 4689 CU_ASSERT(spdk_bdev_set_timeout(desc, 30, bdev_channel_io_timeout_cb, &cb_arg) == 0); 4690 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0x1000, 0, 1, io_done, NULL) == 0); 4691 4692 /* Don't reach the limit */ 4693 spdk_delay_us(15 * spdk_get_ticks_hz()); 4694 poll_threads(); 4695 CU_ASSERT(cb_arg.type == 0); 4696 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4697 CU_ASSERT(cb_arg.iov.iov_len == 0); 4698 4699 /* 15 + 15 = 30 reach the limit */ 4700 spdk_delay_us(15 * spdk_get_ticks_hz()); 4701 poll_threads(); 4702 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4703 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000); 4704 CU_ASSERT(cb_arg.iov.iov_len == 1 * bdev->blocklen); 4705 stub_complete_io(1); 4706 4707 /* Use the same split IO above and check the IO */ 4708 memset(&cb_arg, 0, sizeof(cb_arg)); 4709 CU_ASSERT(spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL) == 0); 4710 4711 /* The first child complete in time */ 4712 spdk_delay_us(15 * spdk_get_ticks_hz()); 4713 poll_threads(); 4714 stub_complete_io(1); 4715 CU_ASSERT(cb_arg.type == 0); 4716 CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0); 4717 CU_ASSERT(cb_arg.iov.iov_len == 0); 4718 4719 /* The second child reach the limit */ 4720 spdk_delay_us(15 * spdk_get_ticks_hz()); 4721 poll_threads(); 4722 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE); 4723 CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000); 4724 CU_ASSERT(cb_arg.iov.iov_len == 8 * bdev->blocklen); 4725 stub_complete_io(1); 4726 4727 /* Also include the reset IO */ 4728 memset(&cb_arg, 0, sizeof(cb_arg)); 4729 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4730 spdk_delay_us(30 * spdk_get_ticks_hz()); 4731 poll_threads(); 4732 CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_RESET); 4733 stub_complete_io(1); 4734 poll_threads(); 4735 4736 spdk_put_io_channel(io_ch); 4737 spdk_bdev_close(desc); 4738 free_bdev(bdev); 4739 ut_fini_bdev(); 4740 } 4741 4742 static void 4743 bdev_set_qd_sampling(void) 4744 { 4745 struct spdk_bdev *bdev; 4746 struct spdk_bdev_desc *desc = NULL; 4747 struct spdk_io_channel *io_ch = NULL; 4748 struct spdk_bdev_channel *bdev_ch = NULL; 4749 struct timeout_io_cb_arg cb_arg; 4750 4751 ut_init_bdev(NULL); 4752 bdev = allocate_bdev("bdev"); 4753 4754 CU_ASSERT(spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc) == 0); 4755 SPDK_CU_ASSERT_FATAL(desc != NULL); 4756 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4757 4758 io_ch = spdk_bdev_get_io_channel(desc); 4759 CU_ASSERT(io_ch != NULL); 4760 4761 bdev_ch = spdk_io_channel_get_ctx(io_ch); 4762 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 4763 4764 /* This is the part1. 4765 * We will check the bdev_ch->io_submitted list 4766 * TO make sure that it can link IOs and only the user submitted IOs 4767 */ 4768 CU_ASSERT(spdk_bdev_read(desc, io_ch, (void *)0x1000, 0, 4096, io_done, NULL) == 0); 4769 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4770 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4771 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 2); 4772 stub_complete_io(1); 4773 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4774 stub_complete_io(1); 4775 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 0); 4776 4777 /* This is the part2. 4778 * Test the bdev's qd poller register 4779 */ 4780 /* 1st Successfully set the qd sampling period */ 4781 spdk_bdev_set_qd_sampling_period(bdev, 10); 4782 CU_ASSERT(bdev->internal.new_period == 10); 4783 CU_ASSERT(bdev->internal.period == 10); 4784 CU_ASSERT(bdev->internal.qd_desc != NULL); 4785 poll_threads(); 4786 CU_ASSERT(bdev->internal.qd_poller != NULL); 4787 4788 /* 2nd Change the qd sampling period */ 4789 spdk_bdev_set_qd_sampling_period(bdev, 20); 4790 CU_ASSERT(bdev->internal.new_period == 20); 4791 CU_ASSERT(bdev->internal.period == 10); 4792 CU_ASSERT(bdev->internal.qd_desc != NULL); 4793 poll_threads(); 4794 CU_ASSERT(bdev->internal.qd_poller != NULL); 4795 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4796 4797 /* 3rd Change the qd sampling period and verify qd_poll_in_progress */ 4798 spdk_delay_us(20); 4799 poll_thread_times(0, 1); 4800 CU_ASSERT(bdev->internal.qd_poll_in_progress == true); 4801 spdk_bdev_set_qd_sampling_period(bdev, 30); 4802 CU_ASSERT(bdev->internal.new_period == 30); 4803 CU_ASSERT(bdev->internal.period == 20); 4804 poll_threads(); 4805 CU_ASSERT(bdev->internal.qd_poll_in_progress == false); 4806 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4807 4808 /* 4th Disable the qd sampling period */ 4809 spdk_bdev_set_qd_sampling_period(bdev, 0); 4810 CU_ASSERT(bdev->internal.new_period == 0); 4811 CU_ASSERT(bdev->internal.period == 30); 4812 poll_threads(); 4813 CU_ASSERT(bdev->internal.qd_poller == NULL); 4814 CU_ASSERT(bdev->internal.period == bdev->internal.new_period); 4815 CU_ASSERT(bdev->internal.qd_desc == NULL); 4816 4817 /* This is the part3. 4818 * We will test the submitted IO and reset works 4819 * properly with the qd sampling. 4820 */ 4821 memset(&cb_arg, 0, sizeof(cb_arg)); 4822 spdk_bdev_set_qd_sampling_period(bdev, 1); 4823 poll_threads(); 4824 4825 CU_ASSERT(spdk_bdev_write(desc, io_ch, (void *)0x2000, 0, 4096, io_done, NULL) == 0); 4826 CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch) == 1); 4827 4828 /* Also include the reset IO */ 4829 memset(&cb_arg, 0, sizeof(cb_arg)); 4830 CU_ASSERT(spdk_bdev_reset(desc, io_ch, io_done, NULL) == 0); 4831 poll_threads(); 4832 4833 /* Close the desc */ 4834 spdk_put_io_channel(io_ch); 4835 spdk_bdev_close(desc); 4836 4837 /* Complete the submitted IO and reset */ 4838 stub_complete_io(2); 4839 poll_threads(); 4840 4841 free_bdev(bdev); 4842 ut_fini_bdev(); 4843 } 4844 4845 static void 4846 lba_range_overlap(void) 4847 { 4848 struct lba_range r1, r2; 4849 4850 r1.offset = 100; 4851 r1.length = 50; 4852 4853 r2.offset = 0; 4854 r2.length = 1; 4855 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4856 4857 r2.offset = 0; 4858 r2.length = 100; 4859 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4860 4861 r2.offset = 0; 4862 r2.length = 110; 4863 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4864 4865 r2.offset = 100; 4866 r2.length = 10; 4867 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4868 4869 r2.offset = 110; 4870 r2.length = 20; 4871 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4872 4873 r2.offset = 140; 4874 r2.length = 150; 4875 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4876 4877 r2.offset = 130; 4878 r2.length = 200; 4879 CU_ASSERT(bdev_lba_range_overlapped(&r1, &r2)); 4880 4881 r2.offset = 150; 4882 r2.length = 100; 4883 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4884 4885 r2.offset = 110; 4886 r2.length = 0; 4887 CU_ASSERT(!bdev_lba_range_overlapped(&r1, &r2)); 4888 } 4889 4890 static bool g_lock_lba_range_done; 4891 static bool g_unlock_lba_range_done; 4892 4893 static void 4894 lock_lba_range_done(struct lba_range *range, void *ctx, int status) 4895 { 4896 g_lock_lba_range_done = true; 4897 } 4898 4899 static void 4900 unlock_lba_range_done(struct lba_range *range, void *ctx, int status) 4901 { 4902 g_unlock_lba_range_done = true; 4903 } 4904 4905 static void 4906 lock_lba_range_check_ranges(void) 4907 { 4908 struct spdk_bdev *bdev; 4909 struct spdk_bdev_desc *desc = NULL; 4910 struct spdk_io_channel *io_ch; 4911 struct spdk_bdev_channel *channel; 4912 struct lba_range *range; 4913 int ctx1; 4914 int rc; 4915 4916 ut_init_bdev(NULL); 4917 bdev = allocate_bdev("bdev0"); 4918 4919 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4920 CU_ASSERT(rc == 0); 4921 CU_ASSERT(desc != NULL); 4922 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4923 io_ch = spdk_bdev_get_io_channel(desc); 4924 CU_ASSERT(io_ch != NULL); 4925 channel = spdk_io_channel_get_ctx(io_ch); 4926 4927 g_lock_lba_range_done = false; 4928 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4929 CU_ASSERT(rc == 0); 4930 poll_threads(); 4931 4932 CU_ASSERT(g_lock_lba_range_done == true); 4933 range = TAILQ_FIRST(&channel->locked_ranges); 4934 SPDK_CU_ASSERT_FATAL(range != NULL); 4935 CU_ASSERT(range->offset == 20); 4936 CU_ASSERT(range->length == 10); 4937 CU_ASSERT(range->owner_ch == channel); 4938 4939 /* Unlocks must exactly match a lock. */ 4940 g_unlock_lba_range_done = false; 4941 rc = bdev_unlock_lba_range(desc, io_ch, 20, 1, unlock_lba_range_done, &ctx1); 4942 CU_ASSERT(rc == -EINVAL); 4943 CU_ASSERT(g_unlock_lba_range_done == false); 4944 4945 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 4946 CU_ASSERT(rc == 0); 4947 spdk_delay_us(100); 4948 poll_threads(); 4949 4950 CU_ASSERT(g_unlock_lba_range_done == true); 4951 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 4952 4953 spdk_put_io_channel(io_ch); 4954 spdk_bdev_close(desc); 4955 free_bdev(bdev); 4956 ut_fini_bdev(); 4957 } 4958 4959 static void 4960 lock_lba_range_with_io_outstanding(void) 4961 { 4962 struct spdk_bdev *bdev; 4963 struct spdk_bdev_desc *desc = NULL; 4964 struct spdk_io_channel *io_ch; 4965 struct spdk_bdev_channel *channel; 4966 struct lba_range *range; 4967 char buf[4096]; 4968 int ctx1; 4969 int rc; 4970 4971 ut_init_bdev(NULL); 4972 bdev = allocate_bdev("bdev0"); 4973 4974 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 4975 CU_ASSERT(rc == 0); 4976 CU_ASSERT(desc != NULL); 4977 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 4978 io_ch = spdk_bdev_get_io_channel(desc); 4979 CU_ASSERT(io_ch != NULL); 4980 channel = spdk_io_channel_get_ctx(io_ch); 4981 4982 g_io_done = false; 4983 rc = spdk_bdev_read_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 4984 CU_ASSERT(rc == 0); 4985 4986 g_lock_lba_range_done = false; 4987 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 4988 CU_ASSERT(rc == 0); 4989 poll_threads(); 4990 4991 /* The lock should immediately become valid, since there are no outstanding 4992 * write I/O. 4993 */ 4994 CU_ASSERT(g_io_done == false); 4995 CU_ASSERT(g_lock_lba_range_done == true); 4996 range = TAILQ_FIRST(&channel->locked_ranges); 4997 SPDK_CU_ASSERT_FATAL(range != NULL); 4998 CU_ASSERT(range->offset == 20); 4999 CU_ASSERT(range->length == 10); 5000 CU_ASSERT(range->owner_ch == channel); 5001 CU_ASSERT(range->locked_ctx == &ctx1); 5002 5003 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5004 CU_ASSERT(rc == 0); 5005 stub_complete_io(1); 5006 spdk_delay_us(100); 5007 poll_threads(); 5008 5009 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5010 5011 /* Now try again, but with a write I/O. */ 5012 g_io_done = false; 5013 rc = spdk_bdev_write_blocks(desc, io_ch, buf, 20, 1, io_done, &ctx1); 5014 CU_ASSERT(rc == 0); 5015 5016 g_lock_lba_range_done = false; 5017 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5018 CU_ASSERT(rc == 0); 5019 poll_threads(); 5020 5021 /* The lock should not be fully valid yet, since a write I/O is outstanding. 5022 * But note that the range should be on the channel's locked_list, to make sure no 5023 * new write I/O are started. 5024 */ 5025 CU_ASSERT(g_io_done == false); 5026 CU_ASSERT(g_lock_lba_range_done == false); 5027 range = TAILQ_FIRST(&channel->locked_ranges); 5028 SPDK_CU_ASSERT_FATAL(range != NULL); 5029 CU_ASSERT(range->offset == 20); 5030 CU_ASSERT(range->length == 10); 5031 5032 /* Complete the write I/O. This should make the lock valid (checked by confirming 5033 * our callback was invoked). 5034 */ 5035 stub_complete_io(1); 5036 spdk_delay_us(100); 5037 poll_threads(); 5038 CU_ASSERT(g_io_done == true); 5039 CU_ASSERT(g_lock_lba_range_done == true); 5040 5041 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5042 CU_ASSERT(rc == 0); 5043 poll_threads(); 5044 5045 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5046 5047 spdk_put_io_channel(io_ch); 5048 spdk_bdev_close(desc); 5049 free_bdev(bdev); 5050 ut_fini_bdev(); 5051 } 5052 5053 static void 5054 lock_lba_range_overlapped(void) 5055 { 5056 struct spdk_bdev *bdev; 5057 struct spdk_bdev_desc *desc = NULL; 5058 struct spdk_io_channel *io_ch; 5059 struct spdk_bdev_channel *channel; 5060 struct lba_range *range; 5061 int ctx1; 5062 int rc; 5063 5064 ut_init_bdev(NULL); 5065 bdev = allocate_bdev("bdev0"); 5066 5067 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5068 CU_ASSERT(rc == 0); 5069 CU_ASSERT(desc != NULL); 5070 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5071 io_ch = spdk_bdev_get_io_channel(desc); 5072 CU_ASSERT(io_ch != NULL); 5073 channel = spdk_io_channel_get_ctx(io_ch); 5074 5075 /* Lock range 20-29. */ 5076 g_lock_lba_range_done = false; 5077 rc = bdev_lock_lba_range(desc, io_ch, 20, 10, lock_lba_range_done, &ctx1); 5078 CU_ASSERT(rc == 0); 5079 poll_threads(); 5080 5081 CU_ASSERT(g_lock_lba_range_done == true); 5082 range = TAILQ_FIRST(&channel->locked_ranges); 5083 SPDK_CU_ASSERT_FATAL(range != NULL); 5084 CU_ASSERT(range->offset == 20); 5085 CU_ASSERT(range->length == 10); 5086 5087 /* Try to lock range 25-39. It should not lock immediately, since it overlaps with 5088 * 20-29. 5089 */ 5090 g_lock_lba_range_done = false; 5091 rc = bdev_lock_lba_range(desc, io_ch, 25, 15, lock_lba_range_done, &ctx1); 5092 CU_ASSERT(rc == 0); 5093 poll_threads(); 5094 5095 CU_ASSERT(g_lock_lba_range_done == false); 5096 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5097 SPDK_CU_ASSERT_FATAL(range != NULL); 5098 CU_ASSERT(range->offset == 25); 5099 CU_ASSERT(range->length == 15); 5100 5101 /* Unlock 20-29. This should result in range 25-39 now getting locked since it 5102 * no longer overlaps with an active lock. 5103 */ 5104 g_unlock_lba_range_done = false; 5105 rc = bdev_unlock_lba_range(desc, io_ch, 20, 10, unlock_lba_range_done, &ctx1); 5106 CU_ASSERT(rc == 0); 5107 poll_threads(); 5108 5109 CU_ASSERT(g_unlock_lba_range_done == true); 5110 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5111 range = TAILQ_FIRST(&channel->locked_ranges); 5112 SPDK_CU_ASSERT_FATAL(range != NULL); 5113 CU_ASSERT(range->offset == 25); 5114 CU_ASSERT(range->length == 15); 5115 5116 /* Lock 40-59. This should immediately lock since it does not overlap with the 5117 * currently active 25-39 lock. 5118 */ 5119 g_lock_lba_range_done = false; 5120 rc = bdev_lock_lba_range(desc, io_ch, 40, 20, lock_lba_range_done, &ctx1); 5121 CU_ASSERT(rc == 0); 5122 poll_threads(); 5123 5124 CU_ASSERT(g_lock_lba_range_done == true); 5125 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5126 SPDK_CU_ASSERT_FATAL(range != NULL); 5127 range = TAILQ_NEXT(range, tailq); 5128 SPDK_CU_ASSERT_FATAL(range != NULL); 5129 CU_ASSERT(range->offset == 40); 5130 CU_ASSERT(range->length == 20); 5131 5132 /* Try to lock 35-44. Note that this overlaps with both 25-39 and 40-59. */ 5133 g_lock_lba_range_done = false; 5134 rc = bdev_lock_lba_range(desc, io_ch, 35, 10, lock_lba_range_done, &ctx1); 5135 CU_ASSERT(rc == 0); 5136 poll_threads(); 5137 5138 CU_ASSERT(g_lock_lba_range_done == false); 5139 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5140 SPDK_CU_ASSERT_FATAL(range != NULL); 5141 CU_ASSERT(range->offset == 35); 5142 CU_ASSERT(range->length == 10); 5143 5144 /* Unlock 25-39. Make sure that 35-44 is still in the pending list, since 5145 * the 40-59 lock is still active. 5146 */ 5147 g_unlock_lba_range_done = false; 5148 rc = bdev_unlock_lba_range(desc, io_ch, 25, 15, unlock_lba_range_done, &ctx1); 5149 CU_ASSERT(rc == 0); 5150 poll_threads(); 5151 5152 CU_ASSERT(g_unlock_lba_range_done == true); 5153 CU_ASSERT(g_lock_lba_range_done == false); 5154 range = TAILQ_FIRST(&bdev->internal.pending_locked_ranges); 5155 SPDK_CU_ASSERT_FATAL(range != NULL); 5156 CU_ASSERT(range->offset == 35); 5157 CU_ASSERT(range->length == 10); 5158 5159 /* Unlock 40-59. This should result in 35-44 now getting locked, since there are 5160 * no longer any active overlapping locks. 5161 */ 5162 g_unlock_lba_range_done = false; 5163 rc = bdev_unlock_lba_range(desc, io_ch, 40, 20, unlock_lba_range_done, &ctx1); 5164 CU_ASSERT(rc == 0); 5165 poll_threads(); 5166 5167 CU_ASSERT(g_unlock_lba_range_done == true); 5168 CU_ASSERT(g_lock_lba_range_done == true); 5169 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.pending_locked_ranges)); 5170 range = TAILQ_FIRST(&bdev->internal.locked_ranges); 5171 SPDK_CU_ASSERT_FATAL(range != NULL); 5172 CU_ASSERT(range->offset == 35); 5173 CU_ASSERT(range->length == 10); 5174 5175 /* Finally, unlock 35-44. */ 5176 g_unlock_lba_range_done = false; 5177 rc = bdev_unlock_lba_range(desc, io_ch, 35, 10, unlock_lba_range_done, &ctx1); 5178 CU_ASSERT(rc == 0); 5179 poll_threads(); 5180 5181 CU_ASSERT(g_unlock_lba_range_done == true); 5182 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.locked_ranges)); 5183 5184 spdk_put_io_channel(io_ch); 5185 spdk_bdev_close(desc); 5186 free_bdev(bdev); 5187 ut_fini_bdev(); 5188 } 5189 5190 static void 5191 bdev_quiesce_done(void *ctx, int status) 5192 { 5193 g_lock_lba_range_done = true; 5194 } 5195 5196 static void 5197 bdev_unquiesce_done(void *ctx, int status) 5198 { 5199 g_unlock_lba_range_done = true; 5200 } 5201 5202 static void 5203 bdev_quiesce_done_unquiesce(void *ctx, int status) 5204 { 5205 struct spdk_bdev *bdev = ctx; 5206 int rc; 5207 5208 g_lock_lba_range_done = true; 5209 5210 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, NULL); 5211 CU_ASSERT(rc == 0); 5212 } 5213 5214 static void 5215 bdev_quiesce(void) 5216 { 5217 struct spdk_bdev *bdev; 5218 struct spdk_bdev_desc *desc = NULL; 5219 struct spdk_io_channel *io_ch; 5220 struct spdk_bdev_channel *channel; 5221 struct lba_range *range; 5222 struct spdk_bdev_io *bdev_io; 5223 int ctx1; 5224 int rc; 5225 5226 ut_init_bdev(NULL); 5227 bdev = allocate_bdev("bdev0"); 5228 5229 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5230 CU_ASSERT(rc == 0); 5231 CU_ASSERT(desc != NULL); 5232 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5233 io_ch = spdk_bdev_get_io_channel(desc); 5234 CU_ASSERT(io_ch != NULL); 5235 channel = spdk_io_channel_get_ctx(io_ch); 5236 5237 g_lock_lba_range_done = false; 5238 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5239 CU_ASSERT(rc == 0); 5240 poll_threads(); 5241 5242 CU_ASSERT(g_lock_lba_range_done == true); 5243 range = TAILQ_FIRST(&channel->locked_ranges); 5244 SPDK_CU_ASSERT_FATAL(range != NULL); 5245 CU_ASSERT(range->offset == 0); 5246 CU_ASSERT(range->length == bdev->blockcnt); 5247 CU_ASSERT(range->owner_ch == NULL); 5248 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5249 SPDK_CU_ASSERT_FATAL(range != NULL); 5250 CU_ASSERT(range->offset == 0); 5251 CU_ASSERT(range->length == bdev->blockcnt); 5252 CU_ASSERT(range->owner_ch == NULL); 5253 5254 g_unlock_lba_range_done = false; 5255 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5256 CU_ASSERT(rc == 0); 5257 spdk_delay_us(100); 5258 poll_threads(); 5259 5260 CU_ASSERT(g_unlock_lba_range_done == true); 5261 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5262 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5263 5264 g_lock_lba_range_done = false; 5265 rc = spdk_bdev_quiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_quiesce_done, &ctx1); 5266 CU_ASSERT(rc == 0); 5267 poll_threads(); 5268 5269 CU_ASSERT(g_lock_lba_range_done == true); 5270 range = TAILQ_FIRST(&channel->locked_ranges); 5271 SPDK_CU_ASSERT_FATAL(range != NULL); 5272 CU_ASSERT(range->offset == 20); 5273 CU_ASSERT(range->length == 10); 5274 CU_ASSERT(range->owner_ch == NULL); 5275 range = TAILQ_FIRST(&bdev_ut_if.internal.quiesced_ranges); 5276 SPDK_CU_ASSERT_FATAL(range != NULL); 5277 CU_ASSERT(range->offset == 20); 5278 CU_ASSERT(range->length == 10); 5279 CU_ASSERT(range->owner_ch == NULL); 5280 5281 /* Unlocks must exactly match a lock. */ 5282 g_unlock_lba_range_done = false; 5283 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 1, bdev_unquiesce_done, &ctx1); 5284 CU_ASSERT(rc == -EINVAL); 5285 CU_ASSERT(g_unlock_lba_range_done == false); 5286 5287 rc = spdk_bdev_unquiesce_range(bdev, &bdev_ut_if, 20, 10, bdev_unquiesce_done, &ctx1); 5288 CU_ASSERT(rc == 0); 5289 spdk_delay_us(100); 5290 poll_threads(); 5291 5292 CU_ASSERT(g_unlock_lba_range_done == true); 5293 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5294 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5295 5296 /* Test unquiesce from quiesce cb */ 5297 g_lock_lba_range_done = false; 5298 g_unlock_lba_range_done = false; 5299 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done_unquiesce, bdev); 5300 CU_ASSERT(rc == 0); 5301 poll_threads(); 5302 5303 CU_ASSERT(g_lock_lba_range_done == true); 5304 CU_ASSERT(g_unlock_lba_range_done == true); 5305 5306 /* Test quiesce with read I/O */ 5307 g_lock_lba_range_done = false; 5308 g_unlock_lba_range_done = false; 5309 g_io_done = false; 5310 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5311 CU_ASSERT(rc == 0); 5312 5313 rc = spdk_bdev_quiesce(bdev, &bdev_ut_if, bdev_quiesce_done, &ctx1); 5314 CU_ASSERT(rc == 0); 5315 poll_threads(); 5316 5317 CU_ASSERT(g_io_done == false); 5318 CU_ASSERT(g_lock_lba_range_done == false); 5319 range = TAILQ_FIRST(&channel->locked_ranges); 5320 SPDK_CU_ASSERT_FATAL(range != NULL); 5321 5322 stub_complete_io(1); 5323 spdk_delay_us(100); 5324 poll_threads(); 5325 CU_ASSERT(g_io_done == true); 5326 CU_ASSERT(g_lock_lba_range_done == true); 5327 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5328 5329 g_io_done = false; 5330 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 20, 1, io_done, &ctx1); 5331 CU_ASSERT(rc == 0); 5332 5333 bdev_io = TAILQ_FIRST(&channel->io_locked); 5334 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 5335 CU_ASSERT(bdev_io->u.bdev.offset_blocks == 20); 5336 CU_ASSERT(bdev_io->u.bdev.num_blocks == 1); 5337 5338 rc = spdk_bdev_unquiesce(bdev, &bdev_ut_if, bdev_unquiesce_done, &ctx1); 5339 CU_ASSERT(rc == 0); 5340 spdk_delay_us(100); 5341 poll_threads(); 5342 5343 CU_ASSERT(g_unlock_lba_range_done == true); 5344 CU_ASSERT(TAILQ_EMPTY(&channel->locked_ranges)); 5345 CU_ASSERT(TAILQ_EMPTY(&bdev_ut_if.internal.quiesced_ranges)); 5346 5347 CU_ASSERT(TAILQ_EMPTY(&channel->io_locked)); 5348 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS); 5349 poll_threads(); 5350 CU_ASSERT(g_io_done == true); 5351 5352 spdk_put_io_channel(io_ch); 5353 spdk_bdev_close(desc); 5354 free_bdev(bdev); 5355 ut_fini_bdev(); 5356 } 5357 5358 static void 5359 abort_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 5360 { 5361 g_abort_done = true; 5362 g_abort_status = bdev_io->internal.status; 5363 spdk_bdev_free_io(bdev_io); 5364 } 5365 5366 static void 5367 bdev_io_abort(void) 5368 { 5369 struct spdk_bdev *bdev; 5370 struct spdk_bdev_desc *desc = NULL; 5371 struct spdk_io_channel *io_ch; 5372 struct spdk_bdev_channel *channel; 5373 struct spdk_bdev_mgmt_channel *mgmt_ch; 5374 struct spdk_bdev_opts bdev_opts = {}; 5375 struct iovec iov[SPDK_BDEV_IO_NUM_CHILD_IOV * 2]; 5376 uint64_t io_ctx1 = 0, io_ctx2 = 0, i; 5377 int rc; 5378 5379 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5380 bdev_opts.bdev_io_pool_size = 7; 5381 bdev_opts.bdev_io_cache_size = 2; 5382 ut_init_bdev(&bdev_opts); 5383 5384 bdev = allocate_bdev("bdev0"); 5385 5386 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5387 CU_ASSERT(rc == 0); 5388 CU_ASSERT(desc != NULL); 5389 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5390 io_ch = spdk_bdev_get_io_channel(desc); 5391 CU_ASSERT(io_ch != NULL); 5392 channel = spdk_io_channel_get_ctx(io_ch); 5393 mgmt_ch = channel->shared_resource->mgmt_ch; 5394 5395 g_abort_done = false; 5396 5397 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, false); 5398 5399 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5400 CU_ASSERT(rc == -ENOTSUP); 5401 5402 ut_enable_io_type(SPDK_BDEV_IO_TYPE_ABORT, true); 5403 5404 rc = spdk_bdev_abort(desc, io_ch, &io_ctx2, abort_done, NULL); 5405 CU_ASSERT(rc == 0); 5406 CU_ASSERT(g_abort_done == true); 5407 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_FAILED); 5408 5409 /* Test the case that the target I/O was successfully aborted. */ 5410 g_io_done = false; 5411 5412 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5413 CU_ASSERT(rc == 0); 5414 CU_ASSERT(g_io_done == false); 5415 5416 g_abort_done = false; 5417 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5418 5419 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5420 CU_ASSERT(rc == 0); 5421 CU_ASSERT(g_io_done == true); 5422 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5423 stub_complete_io(1); 5424 CU_ASSERT(g_abort_done == true); 5425 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5426 5427 /* Test the case that the target I/O was not aborted because it completed 5428 * in the middle of execution of the abort. 5429 */ 5430 g_io_done = false; 5431 5432 rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, &io_ctx1); 5433 CU_ASSERT(rc == 0); 5434 CU_ASSERT(g_io_done == false); 5435 5436 g_abort_done = false; 5437 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5438 5439 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5440 CU_ASSERT(rc == 0); 5441 CU_ASSERT(g_io_done == false); 5442 5443 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5444 stub_complete_io(1); 5445 CU_ASSERT(g_io_done == true); 5446 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5447 5448 g_io_exp_status = SPDK_BDEV_IO_STATUS_FAILED; 5449 stub_complete_io(1); 5450 CU_ASSERT(g_abort_done == true); 5451 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5452 5453 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5454 5455 bdev->optimal_io_boundary = 16; 5456 bdev->split_on_optimal_io_boundary = true; 5457 5458 /* Test that a single-vector command which is split is aborted correctly. 5459 * Offset 14, length 8, payload 0xF000 5460 * Child - Offset 14, length 2, payload 0xF000 5461 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 5462 */ 5463 g_io_done = false; 5464 5465 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, &io_ctx1); 5466 CU_ASSERT(rc == 0); 5467 CU_ASSERT(g_io_done == false); 5468 5469 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5470 5471 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5472 5473 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5474 CU_ASSERT(rc == 0); 5475 CU_ASSERT(g_io_done == true); 5476 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5477 stub_complete_io(2); 5478 CU_ASSERT(g_abort_done == true); 5479 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5480 5481 /* Test that a multi-vector command that needs to be split by strip and then 5482 * needs to be split is aborted correctly. Abort is requested before the second 5483 * child I/O was submitted. The parent I/O should complete with failure without 5484 * submitting the second child I/O. 5485 */ 5486 for (i = 0; i < SPDK_BDEV_IO_NUM_CHILD_IOV * 2; i++) { 5487 iov[i].iov_base = (void *)((i + 1) * 0x10000); 5488 iov[i].iov_len = 512; 5489 } 5490 5491 bdev->optimal_io_boundary = SPDK_BDEV_IO_NUM_CHILD_IOV; 5492 g_io_done = false; 5493 rc = spdk_bdev_readv_blocks(desc, io_ch, iov, SPDK_BDEV_IO_NUM_CHILD_IOV * 2, 0, 5494 SPDK_BDEV_IO_NUM_CHILD_IOV * 2, io_done, &io_ctx1); 5495 CU_ASSERT(rc == 0); 5496 CU_ASSERT(g_io_done == false); 5497 5498 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5499 5500 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5501 5502 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5503 CU_ASSERT(rc == 0); 5504 CU_ASSERT(g_io_done == true); 5505 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5506 stub_complete_io(1); 5507 CU_ASSERT(g_abort_done == true); 5508 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5509 5510 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5511 5512 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5513 5514 bdev->optimal_io_boundary = 16; 5515 g_io_done = false; 5516 5517 /* Test that a single-vector command which is split is aborted correctly. 5518 * Differently from the above, the child abort request will be submitted 5519 * sequentially due to the capacity of spdk_bdev_io. 5520 */ 5521 rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 50, io_done, &io_ctx1); 5522 CU_ASSERT(rc == 0); 5523 CU_ASSERT(g_io_done == false); 5524 5525 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5526 5527 g_abort_done = false; 5528 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5529 5530 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5531 CU_ASSERT(rc == 0); 5532 CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue)); 5533 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4); 5534 5535 stub_complete_io(1); 5536 CU_ASSERT(g_io_done == true); 5537 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5538 stub_complete_io(3); 5539 CU_ASSERT(g_abort_done == true); 5540 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5541 5542 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5543 5544 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5545 5546 bdev->split_on_optimal_io_boundary = false; 5547 bdev->split_on_write_unit = true; 5548 bdev->write_unit_size = 16; 5549 5550 /* Test that a single-vector command which is split is aborted correctly. 5551 * Offset 16, length 32, payload 0xF000 5552 * Child - Offset 16, length 16, payload 0xF000 5553 * Child - Offset 32, length 16, payload 0xF000 + 16 * 512 5554 * 5555 * Use bdev->split_on_write_unit as a split condition. 5556 */ 5557 g_io_done = false; 5558 5559 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 16, 32, io_done, &io_ctx1); 5560 CU_ASSERT(rc == 0); 5561 CU_ASSERT(g_io_done == false); 5562 5563 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5564 5565 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5566 5567 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5568 CU_ASSERT(rc == 0); 5569 CU_ASSERT(g_io_done == true); 5570 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5571 stub_complete_io(2); 5572 CU_ASSERT(g_abort_done == true); 5573 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5574 5575 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5576 5577 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5578 5579 bdev->split_on_write_unit = false; 5580 bdev->max_rw_size = 16; 5581 5582 /* Test that a single-vector command which is split is aborted correctly. 5583 * Use bdev->max_rw_size as a split condition. 5584 */ 5585 g_io_done = false; 5586 5587 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5588 CU_ASSERT(rc == 0); 5589 CU_ASSERT(g_io_done == false); 5590 5591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5592 5593 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5594 5595 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5596 CU_ASSERT(rc == 0); 5597 CU_ASSERT(g_io_done == true); 5598 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5599 stub_complete_io(2); 5600 CU_ASSERT(g_abort_done == true); 5601 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5602 5603 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5604 5605 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5606 5607 bdev->max_rw_size = 0; 5608 bdev->max_segment_size = 512 * 16; 5609 bdev->max_num_segments = 1; 5610 5611 /* Test that a single-vector command which is split is aborted correctly. 5612 * Use bdev->max_segment_size and bdev->max_num_segments together as split conditions. 5613 * 5614 * One single-vector command is changed to one two-vectors command, but 5615 * bdev->max_num_segments is 1 and it is split into two single-vector commands. 5616 */ 5617 g_io_done = false; 5618 5619 rc = spdk_bdev_write_blocks(desc, io_ch, (void *)0xF000, 0, 32, io_done, &io_ctx1); 5620 CU_ASSERT(rc == 0); 5621 CU_ASSERT(g_io_done == false); 5622 5623 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5624 5625 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5626 5627 rc = spdk_bdev_abort(desc, io_ch, &io_ctx1, abort_done, NULL); 5628 CU_ASSERT(rc == 0); 5629 CU_ASSERT(g_io_done == true); 5630 CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED); 5631 stub_complete_io(2); 5632 CU_ASSERT(g_abort_done == true); 5633 CU_ASSERT(g_abort_status == SPDK_BDEV_IO_STATUS_SUCCESS); 5634 5635 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5636 5637 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5638 5639 spdk_put_io_channel(io_ch); 5640 spdk_bdev_close(desc); 5641 free_bdev(bdev); 5642 ut_fini_bdev(); 5643 } 5644 5645 static void 5646 bdev_unmap(void) 5647 { 5648 struct spdk_bdev *bdev; 5649 struct spdk_bdev_desc *desc = NULL; 5650 struct spdk_io_channel *ioch; 5651 struct spdk_bdev_channel *bdev_ch; 5652 struct ut_expected_io *expected_io; 5653 struct spdk_bdev_opts bdev_opts = {}; 5654 uint32_t i, num_outstanding; 5655 uint64_t offset, num_blocks, max_unmap_blocks, num_children; 5656 int rc; 5657 5658 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5659 bdev_opts.bdev_io_pool_size = 512; 5660 bdev_opts.bdev_io_cache_size = 64; 5661 ut_init_bdev(&bdev_opts); 5662 5663 bdev = allocate_bdev("bdev"); 5664 5665 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5666 CU_ASSERT_EQUAL(rc, 0); 5667 SPDK_CU_ASSERT_FATAL(desc != NULL); 5668 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5669 ioch = spdk_bdev_get_io_channel(desc); 5670 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5671 bdev_ch = spdk_io_channel_get_ctx(ioch); 5672 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5673 5674 fn_table.submit_request = stub_submit_request; 5675 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5676 5677 /* Case 1: First test the request won't be split */ 5678 num_blocks = 32; 5679 5680 g_io_done = false; 5681 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 0, num_blocks, 0); 5682 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5683 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5684 CU_ASSERT_EQUAL(rc, 0); 5685 CU_ASSERT(g_io_done == false); 5686 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5687 stub_complete_io(1); 5688 CU_ASSERT(g_io_done == true); 5689 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5690 5691 /* Case 2: Test the split with 2 children requests */ 5692 bdev->max_unmap = 8; 5693 bdev->max_unmap_segments = 2; 5694 max_unmap_blocks = bdev->max_unmap * bdev->max_unmap_segments; 5695 num_blocks = max_unmap_blocks * 2; 5696 offset = 0; 5697 5698 g_io_done = false; 5699 for (i = 0; i < 2; i++) { 5700 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5701 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5702 offset += max_unmap_blocks; 5703 } 5704 5705 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5706 CU_ASSERT_EQUAL(rc, 0); 5707 CU_ASSERT(g_io_done == false); 5708 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5709 stub_complete_io(2); 5710 CU_ASSERT(g_io_done == true); 5711 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5712 5713 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5714 num_children = 15; 5715 num_blocks = max_unmap_blocks * num_children; 5716 g_io_done = false; 5717 offset = 0; 5718 for (i = 0; i < num_children; i++) { 5719 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, offset, max_unmap_blocks, 0); 5720 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5721 offset += max_unmap_blocks; 5722 } 5723 5724 rc = spdk_bdev_unmap_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5725 CU_ASSERT_EQUAL(rc, 0); 5726 CU_ASSERT(g_io_done == false); 5727 5728 while (num_children > 0) { 5729 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5730 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5731 stub_complete_io(num_outstanding); 5732 num_children -= num_outstanding; 5733 } 5734 CU_ASSERT(g_io_done == true); 5735 5736 spdk_put_io_channel(ioch); 5737 spdk_bdev_close(desc); 5738 free_bdev(bdev); 5739 ut_fini_bdev(); 5740 } 5741 5742 static void 5743 bdev_write_zeroes_split_test(void) 5744 { 5745 struct spdk_bdev *bdev; 5746 struct spdk_bdev_desc *desc = NULL; 5747 struct spdk_io_channel *ioch; 5748 struct spdk_bdev_channel *bdev_ch; 5749 struct ut_expected_io *expected_io; 5750 struct spdk_bdev_opts bdev_opts = {}; 5751 uint32_t i, num_outstanding; 5752 uint64_t offset, num_blocks, max_write_zeroes_blocks, num_children; 5753 int rc; 5754 5755 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 5756 bdev_opts.bdev_io_pool_size = 512; 5757 bdev_opts.bdev_io_cache_size = 64; 5758 ut_init_bdev(&bdev_opts); 5759 5760 bdev = allocate_bdev("bdev"); 5761 5762 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 5763 CU_ASSERT_EQUAL(rc, 0); 5764 SPDK_CU_ASSERT_FATAL(desc != NULL); 5765 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5766 ioch = spdk_bdev_get_io_channel(desc); 5767 SPDK_CU_ASSERT_FATAL(ioch != NULL); 5768 bdev_ch = spdk_io_channel_get_ctx(ioch); 5769 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 5770 5771 fn_table.submit_request = stub_submit_request; 5772 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 5773 5774 /* Case 1: First test the request won't be split */ 5775 num_blocks = 32; 5776 5777 g_io_done = false; 5778 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 0, num_blocks, 0); 5779 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5780 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5781 CU_ASSERT_EQUAL(rc, 0); 5782 CU_ASSERT(g_io_done == false); 5783 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5784 stub_complete_io(1); 5785 CU_ASSERT(g_io_done == true); 5786 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5787 5788 /* Case 2: Test the split with 2 children requests */ 5789 max_write_zeroes_blocks = 8; 5790 bdev->max_write_zeroes = max_write_zeroes_blocks; 5791 num_blocks = max_write_zeroes_blocks * 2; 5792 offset = 0; 5793 5794 g_io_done = false; 5795 for (i = 0; i < 2; i++) { 5796 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5797 0); 5798 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5799 offset += max_write_zeroes_blocks; 5800 } 5801 5802 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5803 CU_ASSERT_EQUAL(rc, 0); 5804 CU_ASSERT(g_io_done == false); 5805 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 5806 stub_complete_io(2); 5807 CU_ASSERT(g_io_done == true); 5808 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 5809 5810 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 5811 num_children = 15; 5812 num_blocks = max_write_zeroes_blocks * num_children; 5813 g_io_done = false; 5814 offset = 0; 5815 for (i = 0; i < num_children; i++) { 5816 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, offset, max_write_zeroes_blocks, 5817 0); 5818 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5819 offset += max_write_zeroes_blocks; 5820 } 5821 5822 rc = spdk_bdev_write_zeroes_blocks(desc, ioch, 0, num_blocks, io_done, NULL); 5823 CU_ASSERT_EQUAL(rc, 0); 5824 CU_ASSERT(g_io_done == false); 5825 5826 while (num_children > 0) { 5827 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_UNMAP_WRITE_ZEROES_REQS); 5828 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 5829 stub_complete_io(num_outstanding); 5830 num_children -= num_outstanding; 5831 } 5832 CU_ASSERT(g_io_done == true); 5833 5834 spdk_put_io_channel(ioch); 5835 spdk_bdev_close(desc); 5836 free_bdev(bdev); 5837 ut_fini_bdev(); 5838 } 5839 5840 static void 5841 bdev_set_options_test(void) 5842 { 5843 struct spdk_bdev_opts bdev_opts = {}; 5844 int rc; 5845 5846 /* Case1: Do not set opts_size */ 5847 rc = spdk_bdev_set_opts(&bdev_opts); 5848 CU_ASSERT(rc == -1); 5849 } 5850 5851 static struct spdk_memory_domain *g_bdev_memory_domain = (struct spdk_memory_domain *) 0xf00df00d; 5852 5853 static int 5854 test_bdev_get_supported_dma_device_types_op(void *ctx, struct spdk_memory_domain **domains, 5855 int array_size) 5856 { 5857 if (array_size > 0 && domains) { 5858 domains[0] = g_bdev_memory_domain; 5859 } 5860 5861 return 1; 5862 } 5863 5864 static void 5865 bdev_get_memory_domains(void) 5866 { 5867 struct spdk_bdev_fn_table fn_table = { 5868 .get_memory_domains = test_bdev_get_supported_dma_device_types_op 5869 }; 5870 struct spdk_bdev bdev = { .fn_table = &fn_table }; 5871 struct spdk_memory_domain *domains[2] = {}; 5872 int rc; 5873 5874 /* bdev is NULL */ 5875 rc = spdk_bdev_get_memory_domains(NULL, domains, 2); 5876 CU_ASSERT(rc == -EINVAL); 5877 5878 /* domains is NULL */ 5879 rc = spdk_bdev_get_memory_domains(&bdev, NULL, 2); 5880 CU_ASSERT(rc == 1); 5881 5882 /* array size is 0 */ 5883 rc = spdk_bdev_get_memory_domains(&bdev, domains, 0); 5884 CU_ASSERT(rc == 1); 5885 5886 /* get_supported_dma_device_types op is set */ 5887 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5888 CU_ASSERT(rc == 1); 5889 CU_ASSERT(domains[0] == g_bdev_memory_domain); 5890 5891 /* get_supported_dma_device_types op is not set */ 5892 fn_table.get_memory_domains = NULL; 5893 rc = spdk_bdev_get_memory_domains(&bdev, domains, 2); 5894 CU_ASSERT(rc == 0); 5895 } 5896 5897 static void 5898 _bdev_io_ext(struct spdk_bdev_ext_io_opts *ext_io_opts) 5899 { 5900 struct spdk_bdev *bdev; 5901 struct spdk_bdev_desc *desc = NULL; 5902 struct spdk_io_channel *io_ch; 5903 char io_buf[512]; 5904 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5905 struct ut_expected_io *expected_io; 5906 int rc; 5907 5908 ut_init_bdev(NULL); 5909 5910 bdev = allocate_bdev("bdev0"); 5911 bdev->md_interleave = false; 5912 bdev->md_len = 8; 5913 5914 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 5915 CU_ASSERT(rc == 0); 5916 SPDK_CU_ASSERT_FATAL(desc != NULL); 5917 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 5918 io_ch = spdk_bdev_get_io_channel(desc); 5919 CU_ASSERT(io_ch != NULL); 5920 5921 /* read */ 5922 g_io_done = false; 5923 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 5924 if (ext_io_opts) { 5925 expected_io->md_buf = ext_io_opts->metadata; 5926 } 5927 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5928 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5929 5930 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5931 5932 CU_ASSERT(rc == 0); 5933 CU_ASSERT(g_io_done == false); 5934 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5935 stub_complete_io(1); 5936 CU_ASSERT(g_io_done == true); 5937 5938 /* write */ 5939 g_io_done = false; 5940 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 5941 if (ext_io_opts) { 5942 expected_io->md_buf = ext_io_opts->metadata; 5943 } 5944 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 5945 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 5946 5947 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, ext_io_opts); 5948 5949 CU_ASSERT(rc == 0); 5950 CU_ASSERT(g_io_done == false); 5951 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 5952 stub_complete_io(1); 5953 CU_ASSERT(g_io_done == true); 5954 5955 spdk_put_io_channel(io_ch); 5956 spdk_bdev_close(desc); 5957 free_bdev(bdev); 5958 ut_fini_bdev(); 5959 5960 } 5961 5962 static void 5963 bdev_io_ext(void) 5964 { 5965 struct spdk_bdev_ext_io_opts ext_io_opts = { 5966 .metadata = (void *)0xFF000000, 5967 .size = sizeof(ext_io_opts), 5968 .dif_check_flags_exclude_mask = 0 5969 }; 5970 5971 _bdev_io_ext(&ext_io_opts); 5972 } 5973 5974 static void 5975 bdev_io_ext_no_opts(void) 5976 { 5977 _bdev_io_ext(NULL); 5978 } 5979 5980 static void 5981 bdev_io_ext_invalid_opts(void) 5982 { 5983 struct spdk_bdev *bdev; 5984 struct spdk_bdev_desc *desc = NULL; 5985 struct spdk_io_channel *io_ch; 5986 char io_buf[512]; 5987 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 5988 struct spdk_bdev_ext_io_opts ext_io_opts = { 5989 .metadata = (void *)0xFF000000, 5990 .size = sizeof(ext_io_opts), 5991 .dif_check_flags_exclude_mask = 0 5992 }; 5993 int rc; 5994 5995 ut_init_bdev(NULL); 5996 5997 bdev = allocate_bdev("bdev0"); 5998 bdev->md_interleave = false; 5999 bdev->md_len = 8; 6000 6001 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6002 CU_ASSERT(rc == 0); 6003 SPDK_CU_ASSERT_FATAL(desc != NULL); 6004 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6005 io_ch = spdk_bdev_get_io_channel(desc); 6006 CU_ASSERT(io_ch != NULL); 6007 6008 /* Test invalid ext_opts size */ 6009 ext_io_opts.size = 0; 6010 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6011 CU_ASSERT(rc == -EINVAL); 6012 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6013 CU_ASSERT(rc == -EINVAL); 6014 6015 ext_io_opts.size = sizeof(ext_io_opts) * 2; 6016 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6017 CU_ASSERT(rc == -EINVAL); 6018 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6019 CU_ASSERT(rc == -EINVAL); 6020 6021 ext_io_opts.size = offsetof(struct spdk_bdev_ext_io_opts, metadata) + 6022 sizeof(ext_io_opts.metadata) - 1; 6023 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6024 CU_ASSERT(rc == -EINVAL); 6025 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6026 CU_ASSERT(rc == -EINVAL); 6027 6028 spdk_put_io_channel(io_ch); 6029 spdk_bdev_close(desc); 6030 free_bdev(bdev); 6031 ut_fini_bdev(); 6032 } 6033 6034 static void 6035 bdev_io_ext_split(void) 6036 { 6037 struct spdk_bdev *bdev; 6038 struct spdk_bdev_desc *desc = NULL; 6039 struct spdk_io_channel *io_ch; 6040 char io_buf[512]; 6041 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6042 struct ut_expected_io *expected_io; 6043 struct spdk_bdev_ext_io_opts ext_io_opts = { 6044 .metadata = (void *)0xFF000000, 6045 .size = sizeof(ext_io_opts), 6046 .dif_check_flags_exclude_mask = 0 6047 }; 6048 int rc; 6049 6050 ut_init_bdev(NULL); 6051 6052 bdev = allocate_bdev("bdev0"); 6053 bdev->md_interleave = false; 6054 bdev->md_len = 8; 6055 6056 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6057 CU_ASSERT(rc == 0); 6058 SPDK_CU_ASSERT_FATAL(desc != NULL); 6059 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6060 io_ch = spdk_bdev_get_io_channel(desc); 6061 CU_ASSERT(io_ch != NULL); 6062 6063 /* Check that IO request with ext_opts and metadata is split correctly 6064 * Offset 14, length 8, payload 0xF000 6065 * Child - Offset 14, length 2, payload 0xF000 6066 * Child - Offset 16, length 6, payload 0xF000 + 2 * 512 6067 */ 6068 bdev->optimal_io_boundary = 16; 6069 bdev->split_on_optimal_io_boundary = true; 6070 bdev->md_interleave = false; 6071 bdev->md_len = 8; 6072 6073 iov.iov_base = (void *)0xF000; 6074 iov.iov_len = 4096; 6075 memset(&ext_io_opts, 0, sizeof(ext_io_opts)); 6076 ext_io_opts.metadata = (void *)0xFF000000; 6077 ext_io_opts.size = sizeof(ext_io_opts); 6078 g_io_done = false; 6079 6080 /* read */ 6081 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1); 6082 expected_io->md_buf = ext_io_opts.metadata; 6083 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6084 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6085 6086 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1); 6087 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6088 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6089 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6090 6091 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6092 CU_ASSERT(rc == 0); 6093 CU_ASSERT(g_io_done == false); 6094 6095 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6096 stub_complete_io(2); 6097 CU_ASSERT(g_io_done == true); 6098 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6099 6100 /* write */ 6101 g_io_done = false; 6102 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 1); 6103 expected_io->md_buf = ext_io_opts.metadata; 6104 ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512); 6105 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6106 6107 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 6, 1); 6108 expected_io->md_buf = ext_io_opts.metadata + 2 * 8; 6109 ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512); 6110 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6111 6112 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 14, 8, io_done, NULL, &ext_io_opts); 6113 CU_ASSERT(rc == 0); 6114 CU_ASSERT(g_io_done == false); 6115 6116 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6117 stub_complete_io(2); 6118 CU_ASSERT(g_io_done == true); 6119 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6120 6121 spdk_put_io_channel(io_ch); 6122 spdk_bdev_close(desc); 6123 free_bdev(bdev); 6124 ut_fini_bdev(); 6125 } 6126 6127 static void 6128 bdev_io_ext_bounce_buffer(void) 6129 { 6130 struct spdk_bdev *bdev; 6131 struct spdk_bdev_desc *desc = NULL; 6132 struct spdk_io_channel *io_ch; 6133 char io_buf[512]; 6134 struct iovec iov = { .iov_base = io_buf, .iov_len = 512 }; 6135 struct ut_expected_io *expected_io, *aux_io; 6136 struct spdk_bdev_ext_io_opts ext_io_opts = { 6137 .metadata = (void *)0xFF000000, 6138 .size = sizeof(ext_io_opts), 6139 .dif_check_flags_exclude_mask = 0 6140 }; 6141 int rc; 6142 6143 ut_init_bdev(NULL); 6144 6145 bdev = allocate_bdev("bdev0"); 6146 bdev->md_interleave = false; 6147 bdev->md_len = 8; 6148 6149 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6150 CU_ASSERT(rc == 0); 6151 SPDK_CU_ASSERT_FATAL(desc != NULL); 6152 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6153 io_ch = spdk_bdev_get_io_channel(desc); 6154 CU_ASSERT(io_ch != NULL); 6155 6156 /* Verify data pull/push 6157 * bdev doesn't support memory domains, so buffers from bdev memory pool will be used */ 6158 ext_io_opts.memory_domain = (struct spdk_memory_domain *)0xdeadbeef; 6159 6160 /* read */ 6161 g_io_done = false; 6162 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6163 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6164 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6165 6166 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6167 6168 CU_ASSERT(rc == 0); 6169 CU_ASSERT(g_io_done == false); 6170 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6171 stub_complete_io(1); 6172 CU_ASSERT(g_memory_domain_push_data_called == true); 6173 CU_ASSERT(g_io_done == true); 6174 6175 /* write */ 6176 g_io_done = false; 6177 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6178 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6179 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6180 6181 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6182 6183 CU_ASSERT(rc == 0); 6184 CU_ASSERT(g_memory_domain_pull_data_called == true); 6185 CU_ASSERT(g_io_done == false); 6186 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6187 stub_complete_io(1); 6188 CU_ASSERT(g_io_done == true); 6189 6190 /* Verify the request is queued after receiving ENOMEM from pull */ 6191 g_io_done = false; 6192 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6193 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6194 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6195 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6196 CU_ASSERT(rc == 0); 6197 CU_ASSERT(g_io_done == false); 6198 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6199 6200 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6201 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6202 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6203 6204 MOCK_SET(spdk_memory_domain_pull_data, -ENOMEM); 6205 rc = spdk_bdev_writev_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6206 CU_ASSERT(rc == 0); 6207 CU_ASSERT(g_io_done == false); 6208 /* The second IO has been queued */ 6209 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6210 6211 MOCK_CLEAR(spdk_memory_domain_pull_data); 6212 g_memory_domain_pull_data_called = false; 6213 stub_complete_io(1); 6214 CU_ASSERT(g_io_done == true); 6215 CU_ASSERT(g_memory_domain_pull_data_called == true); 6216 /* The second IO should be submitted now */ 6217 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6218 g_io_done = false; 6219 stub_complete_io(1); 6220 CU_ASSERT(g_io_done == true); 6221 6222 /* Verify the request is queued after receiving ENOMEM from push */ 6223 g_io_done = false; 6224 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 32, 14, 1); 6225 ut_expected_io_set_iov(expected_io, 0, iov.iov_base, iov.iov_len); 6226 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6227 6228 MOCK_SET(spdk_memory_domain_push_data, -ENOMEM); 6229 rc = spdk_bdev_readv_blocks_ext(desc, io_ch, &iov, 1, 32, 14, io_done, NULL, &ext_io_opts); 6230 CU_ASSERT(rc == 0); 6231 CU_ASSERT(g_io_done == false); 6232 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6233 6234 aux_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 1); 6235 ut_expected_io_set_iov(aux_io, 0, iov.iov_base, iov.iov_len); 6236 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, aux_io, link); 6237 rc = spdk_bdev_writev_blocks(desc, io_ch, &iov, 1, 32, 14, io_done, NULL); 6238 CU_ASSERT(rc == 0); 6239 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2); 6240 6241 stub_complete_io(1); 6242 /* The IO isn't done yet, it's still waiting on push */ 6243 CU_ASSERT(g_io_done == false); 6244 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6245 MOCK_CLEAR(spdk_memory_domain_push_data); 6246 g_memory_domain_push_data_called = false; 6247 /* Completing the second IO should also trigger push on the first one */ 6248 stub_complete_io(1); 6249 CU_ASSERT(g_io_done == true); 6250 CU_ASSERT(g_memory_domain_push_data_called == true); 6251 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6252 6253 spdk_put_io_channel(io_ch); 6254 spdk_bdev_close(desc); 6255 free_bdev(bdev); 6256 ut_fini_bdev(); 6257 } 6258 6259 static void 6260 bdev_register_uuid_alias(void) 6261 { 6262 struct spdk_bdev *bdev, *second; 6263 char uuid[SPDK_UUID_STRING_LEN]; 6264 int rc; 6265 6266 ut_init_bdev(NULL); 6267 bdev = allocate_bdev("bdev0"); 6268 6269 /* Make sure an UUID was generated */ 6270 CU_ASSERT_FALSE(spdk_uuid_is_null(&bdev->uuid)); 6271 6272 /* Check that an UUID alias was registered */ 6273 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6274 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6275 6276 /* Unregister the bdev */ 6277 spdk_bdev_unregister(bdev, NULL, NULL); 6278 poll_threads(); 6279 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6280 6281 /* Check the same, but this time register the bdev with non-zero UUID */ 6282 rc = spdk_bdev_register(bdev); 6283 CU_ASSERT_EQUAL(rc, 0); 6284 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6285 6286 /* Unregister the bdev */ 6287 spdk_bdev_unregister(bdev, NULL, NULL); 6288 poll_threads(); 6289 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6290 6291 /* Register the bdev using UUID as the name */ 6292 bdev->name = uuid; 6293 rc = spdk_bdev_register(bdev); 6294 CU_ASSERT_EQUAL(rc, 0); 6295 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6296 6297 /* Unregister the bdev */ 6298 spdk_bdev_unregister(bdev, NULL, NULL); 6299 poll_threads(); 6300 CU_ASSERT_PTR_NULL(spdk_bdev_get_by_name(uuid)); 6301 6302 /* Check that it's not possible to register two bdevs with the same UUIDs */ 6303 bdev->name = "bdev0"; 6304 second = allocate_bdev("bdev1"); 6305 spdk_uuid_copy(&bdev->uuid, &second->uuid); 6306 rc = spdk_bdev_register(bdev); 6307 CU_ASSERT_EQUAL(rc, -EEXIST); 6308 6309 /* Regenerate the UUID and re-check */ 6310 spdk_uuid_generate(&bdev->uuid); 6311 rc = spdk_bdev_register(bdev); 6312 CU_ASSERT_EQUAL(rc, 0); 6313 6314 /* And check that both bdevs can be retrieved through their UUIDs */ 6315 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &bdev->uuid); 6316 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), bdev); 6317 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &second->uuid); 6318 CU_ASSERT_EQUAL(spdk_bdev_get_by_name(uuid), second); 6319 6320 free_bdev(second); 6321 free_bdev(bdev); 6322 ut_fini_bdev(); 6323 } 6324 6325 static void 6326 bdev_unregister_by_name(void) 6327 { 6328 struct spdk_bdev *bdev; 6329 int rc; 6330 6331 bdev = allocate_bdev("bdev"); 6332 6333 g_event_type1 = 0xFF; 6334 g_unregister_arg = NULL; 6335 g_unregister_rc = -1; 6336 6337 rc = spdk_bdev_unregister_by_name("bdev1", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6338 CU_ASSERT(rc == -ENODEV); 6339 6340 rc = spdk_bdev_unregister_by_name("bdev", &vbdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6341 CU_ASSERT(rc == -ENODEV); 6342 6343 rc = spdk_bdev_unregister_by_name("bdev", &bdev_ut_if, bdev_unregister_cb, (void *)0x12345678); 6344 CU_ASSERT(rc == 0); 6345 6346 /* Check that unregister callback is delayed */ 6347 CU_ASSERT(g_unregister_arg == NULL); 6348 CU_ASSERT(g_unregister_rc == -1); 6349 6350 poll_threads(); 6351 6352 /* Event callback shall not be issued because device was closed */ 6353 CU_ASSERT(g_event_type1 == 0xFF); 6354 /* Unregister callback is issued */ 6355 CU_ASSERT(g_unregister_arg == (void *)0x12345678); 6356 CU_ASSERT(g_unregister_rc == 0); 6357 6358 free_bdev(bdev); 6359 } 6360 6361 static int 6362 count_bdevs(void *ctx, struct spdk_bdev *bdev) 6363 { 6364 int *count = ctx; 6365 6366 (*count)++; 6367 6368 return 0; 6369 } 6370 6371 static void 6372 for_each_bdev_test(void) 6373 { 6374 struct spdk_bdev *bdev[8]; 6375 int rc, count; 6376 6377 bdev[0] = allocate_bdev("bdev0"); 6378 bdev[0]->internal.status = SPDK_BDEV_STATUS_REMOVING; 6379 6380 bdev[1] = allocate_bdev("bdev1"); 6381 rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if); 6382 CU_ASSERT(rc == 0); 6383 6384 bdev[2] = allocate_bdev("bdev2"); 6385 6386 bdev[3] = allocate_bdev("bdev3"); 6387 rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if); 6388 CU_ASSERT(rc == 0); 6389 6390 bdev[4] = allocate_bdev("bdev4"); 6391 6392 bdev[5] = allocate_bdev("bdev5"); 6393 rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if); 6394 CU_ASSERT(rc == 0); 6395 6396 bdev[6] = allocate_bdev("bdev6"); 6397 6398 bdev[7] = allocate_bdev("bdev7"); 6399 6400 count = 0; 6401 rc = spdk_for_each_bdev(&count, count_bdevs); 6402 CU_ASSERT(rc == 0); 6403 CU_ASSERT(count == 7); 6404 6405 count = 0; 6406 rc = spdk_for_each_bdev_leaf(&count, count_bdevs); 6407 CU_ASSERT(rc == 0); 6408 CU_ASSERT(count == 4); 6409 6410 bdev[0]->internal.status = SPDK_BDEV_STATUS_READY; 6411 free_bdev(bdev[0]); 6412 free_bdev(bdev[1]); 6413 free_bdev(bdev[2]); 6414 free_bdev(bdev[3]); 6415 free_bdev(bdev[4]); 6416 free_bdev(bdev[5]); 6417 free_bdev(bdev[6]); 6418 free_bdev(bdev[7]); 6419 } 6420 6421 static void 6422 bdev_seek_test(void) 6423 { 6424 struct spdk_bdev *bdev; 6425 struct spdk_bdev_desc *desc = NULL; 6426 struct spdk_io_channel *io_ch; 6427 int rc; 6428 6429 ut_init_bdev(NULL); 6430 poll_threads(); 6431 6432 bdev = allocate_bdev("bdev0"); 6433 6434 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6435 CU_ASSERT(rc == 0); 6436 poll_threads(); 6437 SPDK_CU_ASSERT_FATAL(desc != NULL); 6438 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6439 io_ch = spdk_bdev_get_io_channel(desc); 6440 CU_ASSERT(io_ch != NULL); 6441 6442 /* Seek data not supported */ 6443 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, false); 6444 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6445 CU_ASSERT(rc == 0); 6446 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6447 poll_threads(); 6448 CU_ASSERT(g_seek_offset == 0); 6449 6450 /* Seek hole not supported */ 6451 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, false); 6452 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6453 CU_ASSERT(rc == 0); 6454 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6455 poll_threads(); 6456 CU_ASSERT(g_seek_offset == UINT64_MAX); 6457 6458 /* Seek data supported */ 6459 g_seek_data_offset = 12345; 6460 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_DATA, true); 6461 rc = spdk_bdev_seek_data(desc, io_ch, 0, bdev_seek_cb, NULL); 6462 CU_ASSERT(rc == 0); 6463 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6464 stub_complete_io(1); 6465 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6466 CU_ASSERT(g_seek_offset == 12345); 6467 6468 /* Seek hole supported */ 6469 g_seek_hole_offset = 67890; 6470 ut_enable_io_type(SPDK_BDEV_IO_TYPE_SEEK_HOLE, true); 6471 rc = spdk_bdev_seek_hole(desc, io_ch, 0, bdev_seek_cb, NULL); 6472 CU_ASSERT(rc == 0); 6473 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6474 stub_complete_io(1); 6475 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6476 CU_ASSERT(g_seek_offset == 67890); 6477 6478 spdk_put_io_channel(io_ch); 6479 spdk_bdev_close(desc); 6480 free_bdev(bdev); 6481 ut_fini_bdev(); 6482 } 6483 6484 static void 6485 bdev_copy(void) 6486 { 6487 struct spdk_bdev *bdev; 6488 struct spdk_bdev_desc *desc = NULL; 6489 struct spdk_io_channel *ioch; 6490 struct ut_expected_io *expected_io; 6491 uint64_t src_offset, num_blocks; 6492 uint32_t num_completed; 6493 int rc; 6494 6495 ut_init_bdev(NULL); 6496 bdev = allocate_bdev("bdev"); 6497 6498 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6499 CU_ASSERT_EQUAL(rc, 0); 6500 SPDK_CU_ASSERT_FATAL(desc != NULL); 6501 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6502 ioch = spdk_bdev_get_io_channel(desc); 6503 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6504 6505 fn_table.submit_request = stub_submit_request; 6506 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6507 6508 /* First test that if the bdev supports copy, the request won't be split */ 6509 bdev->md_len = 0; 6510 bdev->blocklen = 512; 6511 num_blocks = 128; 6512 src_offset = bdev->blockcnt - num_blocks; 6513 6514 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6515 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6516 6517 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6518 CU_ASSERT_EQUAL(rc, 0); 6519 num_completed = stub_complete_io(1); 6520 CU_ASSERT_EQUAL(num_completed, 1); 6521 6522 /* Check that if copy is not supported it'll still work */ 6523 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, num_blocks, 0); 6524 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6525 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 0, num_blocks, 0); 6526 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6527 6528 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6529 6530 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6531 CU_ASSERT_EQUAL(rc, 0); 6532 num_completed = stub_complete_io(1); 6533 CU_ASSERT_EQUAL(num_completed, 1); 6534 num_completed = stub_complete_io(1); 6535 CU_ASSERT_EQUAL(num_completed, 1); 6536 6537 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6538 spdk_put_io_channel(ioch); 6539 spdk_bdev_close(desc); 6540 free_bdev(bdev); 6541 ut_fini_bdev(); 6542 } 6543 6544 static void 6545 bdev_copy_split_test(void) 6546 { 6547 struct spdk_bdev *bdev; 6548 struct spdk_bdev_desc *desc = NULL; 6549 struct spdk_io_channel *ioch; 6550 struct spdk_bdev_channel *bdev_ch; 6551 struct ut_expected_io *expected_io; 6552 struct spdk_bdev_opts bdev_opts = {}; 6553 uint32_t i, num_outstanding; 6554 uint64_t offset, src_offset, num_blocks, max_copy_blocks, num_children; 6555 int rc; 6556 6557 spdk_bdev_get_opts(&bdev_opts, sizeof(bdev_opts)); 6558 bdev_opts.bdev_io_pool_size = 512; 6559 bdev_opts.bdev_io_cache_size = 64; 6560 rc = spdk_bdev_set_opts(&bdev_opts); 6561 CU_ASSERT(rc == 0); 6562 6563 ut_init_bdev(NULL); 6564 bdev = allocate_bdev("bdev"); 6565 6566 rc = spdk_bdev_open_ext("bdev", true, bdev_ut_event_cb, NULL, &desc); 6567 CU_ASSERT_EQUAL(rc, 0); 6568 SPDK_CU_ASSERT_FATAL(desc != NULL); 6569 CU_ASSERT(bdev == spdk_bdev_desc_get_bdev(desc)); 6570 ioch = spdk_bdev_get_io_channel(desc); 6571 SPDK_CU_ASSERT_FATAL(ioch != NULL); 6572 bdev_ch = spdk_io_channel_get_ctx(ioch); 6573 CU_ASSERT(TAILQ_EMPTY(&bdev_ch->io_submitted)); 6574 6575 fn_table.submit_request = stub_submit_request; 6576 g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS; 6577 6578 /* Case 1: First test the request won't be split */ 6579 num_blocks = 32; 6580 src_offset = bdev->blockcnt - num_blocks; 6581 6582 g_io_done = false; 6583 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, 0, src_offset, num_blocks); 6584 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6585 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6586 CU_ASSERT_EQUAL(rc, 0); 6587 CU_ASSERT(g_io_done == false); 6588 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1); 6589 stub_complete_io(1); 6590 CU_ASSERT(g_io_done == true); 6591 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6592 6593 /* Case 2: Test the split with 2 children requests */ 6594 max_copy_blocks = 8; 6595 bdev->max_copy = max_copy_blocks; 6596 num_children = 2; 6597 num_blocks = max_copy_blocks * num_children; 6598 offset = 0; 6599 src_offset = bdev->blockcnt - num_blocks; 6600 6601 g_io_done = false; 6602 for (i = 0; i < num_children; i++) { 6603 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6604 src_offset + offset, max_copy_blocks); 6605 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6606 offset += max_copy_blocks; 6607 } 6608 6609 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6610 CU_ASSERT_EQUAL(rc, 0); 6611 CU_ASSERT(g_io_done == false); 6612 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_children); 6613 stub_complete_io(num_children); 6614 CU_ASSERT(g_io_done == true); 6615 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0); 6616 6617 /* Case 3: Test the split with 15 children requests, will finish 8 requests first */ 6618 num_children = 15; 6619 num_blocks = max_copy_blocks * num_children; 6620 offset = 0; 6621 src_offset = bdev->blockcnt - num_blocks; 6622 6623 g_io_done = false; 6624 for (i = 0; i < num_children; i++) { 6625 expected_io = ut_alloc_expected_copy_io(SPDK_BDEV_IO_TYPE_COPY, offset, 6626 src_offset + offset, max_copy_blocks); 6627 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6628 offset += max_copy_blocks; 6629 } 6630 6631 rc = spdk_bdev_copy_blocks(desc, ioch, 0, src_offset, num_blocks, io_done, NULL); 6632 CU_ASSERT_EQUAL(rc, 0); 6633 CU_ASSERT(g_io_done == false); 6634 6635 while (num_children > 0) { 6636 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6637 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6638 stub_complete_io(num_outstanding); 6639 num_children -= num_outstanding; 6640 } 6641 CU_ASSERT(g_io_done == true); 6642 6643 /* Case 4: Same test scenario as the case 2 but the configuration is different. 6644 * Copy is not supported. 6645 */ 6646 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, false); 6647 6648 num_children = 2; 6649 max_copy_blocks = spdk_bdev_get_max_copy(bdev); 6650 num_blocks = max_copy_blocks * num_children; 6651 src_offset = bdev->blockcnt - num_blocks; 6652 offset = 0; 6653 6654 g_io_done = false; 6655 for (i = 0; i < num_children; i++) { 6656 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, src_offset, 6657 max_copy_blocks, 0); 6658 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6659 src_offset += max_copy_blocks; 6660 } 6661 for (i = 0; i < num_children; i++) { 6662 expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, offset, 6663 max_copy_blocks, 0); 6664 TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link); 6665 offset += max_copy_blocks; 6666 } 6667 6668 src_offset = bdev->blockcnt - num_blocks; 6669 offset = 0; 6670 6671 rc = spdk_bdev_copy_blocks(desc, ioch, offset, src_offset, num_blocks, io_done, NULL); 6672 CU_ASSERT_EQUAL(rc, 0); 6673 CU_ASSERT(g_io_done == false); 6674 6675 while (num_children > 0) { 6676 num_outstanding = spdk_min(num_children, SPDK_BDEV_MAX_CHILDREN_COPY_REQS); 6677 6678 /* One copy request is split into one read and one write requests. */ 6679 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6680 stub_complete_io(num_outstanding); 6681 CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == num_outstanding); 6682 stub_complete_io(num_outstanding); 6683 6684 num_children -= num_outstanding; 6685 } 6686 CU_ASSERT(g_io_done == true); 6687 6688 ut_enable_io_type(SPDK_BDEV_IO_TYPE_COPY, true); 6689 6690 spdk_put_io_channel(ioch); 6691 spdk_bdev_close(desc); 6692 free_bdev(bdev); 6693 ut_fini_bdev(); 6694 } 6695 6696 static void 6697 examine_claim_v1(struct spdk_bdev *bdev) 6698 { 6699 int rc; 6700 6701 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &vbdev_ut_if); 6702 CU_ASSERT(rc == 0); 6703 } 6704 6705 static void 6706 examine_no_lock_held(struct spdk_bdev *bdev) 6707 { 6708 CU_ASSERT(!spdk_spin_held(&g_bdev_mgr.spinlock)); 6709 CU_ASSERT(!spdk_spin_held(&bdev->internal.spinlock)); 6710 } 6711 6712 struct examine_claim_v2_ctx { 6713 struct ut_examine_ctx examine_ctx; 6714 enum spdk_bdev_claim_type claim_type; 6715 struct spdk_bdev_desc *desc; 6716 }; 6717 6718 static void 6719 examine_claim_v2(struct spdk_bdev *bdev) 6720 { 6721 struct examine_claim_v2_ctx *ctx = bdev->ctxt; 6722 int rc; 6723 6724 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, NULL, &ctx->desc); 6725 CU_ASSERT(rc == 0); 6726 6727 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, &vbdev_ut_if); 6728 CU_ASSERT(rc == 0); 6729 } 6730 6731 static void 6732 examine_locks(void) 6733 { 6734 struct spdk_bdev *bdev; 6735 struct ut_examine_ctx ctx = { 0 }; 6736 struct examine_claim_v2_ctx v2_ctx; 6737 6738 /* Without any claims, one code path is taken */ 6739 ctx.examine_config = examine_no_lock_held; 6740 ctx.examine_disk = examine_no_lock_held; 6741 bdev = allocate_bdev_ctx("bdev0", &ctx); 6742 CU_ASSERT(ctx.examine_config_count == 1); 6743 CU_ASSERT(ctx.examine_disk_count == 1); 6744 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6745 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6746 free_bdev(bdev); 6747 6748 /* Exercise another path that is taken when examine_config() takes a v1 claim. */ 6749 memset(&ctx, 0, sizeof(ctx)); 6750 ctx.examine_config = examine_claim_v1; 6751 ctx.examine_disk = examine_no_lock_held; 6752 bdev = allocate_bdev_ctx("bdev0", &ctx); 6753 CU_ASSERT(ctx.examine_config_count == 1); 6754 CU_ASSERT(ctx.examine_disk_count == 1); 6755 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 6756 CU_ASSERT(bdev->internal.claim.v1.module == &vbdev_ut_if); 6757 spdk_bdev_module_release_bdev(bdev); 6758 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6759 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 6760 free_bdev(bdev); 6761 6762 /* Exercise the final path that comes with v2 claims. */ 6763 memset(&v2_ctx, 0, sizeof(v2_ctx)); 6764 v2_ctx.examine_ctx.examine_config = examine_claim_v2; 6765 v2_ctx.examine_ctx.examine_disk = examine_no_lock_held; 6766 v2_ctx.claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 6767 bdev = allocate_bdev_ctx("bdev0", &v2_ctx); 6768 CU_ASSERT(v2_ctx.examine_ctx.examine_config_count == 1); 6769 CU_ASSERT(v2_ctx.examine_ctx.examine_disk_count == 1); 6770 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6771 spdk_bdev_close(v2_ctx.desc); 6772 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6773 free_bdev(bdev); 6774 } 6775 6776 #define UT_ASSERT_CLAIM_V2_COUNT(bdev, expect) \ 6777 do { \ 6778 uint32_t len = 0; \ 6779 struct spdk_bdev_module_claim *claim; \ 6780 TAILQ_FOREACH(claim, &bdev->internal.claim.v2.claims, link) { \ 6781 len++; \ 6782 } \ 6783 CU_ASSERT(len == expect); \ 6784 } while (0) 6785 6786 static void 6787 claim_v2_rwo(void) 6788 { 6789 struct spdk_bdev *bdev; 6790 struct spdk_bdev_desc *desc; 6791 struct spdk_bdev_desc *desc2; 6792 struct spdk_bdev_claim_opts opts; 6793 int rc; 6794 6795 bdev = allocate_bdev("bdev0"); 6796 6797 /* Claim without options */ 6798 desc = NULL; 6799 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6800 CU_ASSERT(rc == 0); 6801 SPDK_CU_ASSERT_FATAL(desc != NULL); 6802 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6803 &bdev_ut_if); 6804 CU_ASSERT(rc == 0); 6805 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6806 CU_ASSERT(desc->claim != NULL); 6807 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6808 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6809 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6810 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6811 6812 /* Release the claim by closing the descriptor */ 6813 spdk_bdev_close(desc); 6814 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6815 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6816 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6817 6818 /* Claim with options */ 6819 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6820 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6821 desc = NULL; 6822 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 6823 CU_ASSERT(rc == 0); 6824 SPDK_CU_ASSERT_FATAL(desc != NULL); 6825 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6826 &bdev_ut_if); 6827 CU_ASSERT(rc == 0); 6828 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6829 CU_ASSERT(desc->claim != NULL); 6830 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6831 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6832 memset(&opts, 0, sizeof(opts)); 6833 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6834 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6835 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6836 6837 /* The claim blocks new writers. */ 6838 desc2 = NULL; 6839 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6840 CU_ASSERT(rc == -EPERM); 6841 CU_ASSERT(desc2 == NULL); 6842 6843 /* New readers are allowed */ 6844 desc2 = NULL; 6845 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6846 CU_ASSERT(rc == 0); 6847 CU_ASSERT(desc2 != NULL); 6848 CU_ASSERT(!desc2->write); 6849 6850 /* No new v2 RWO claims are allowed */ 6851 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6852 &bdev_ut_if); 6853 CU_ASSERT(rc == -EPERM); 6854 6855 /* No new v2 ROM claims are allowed */ 6856 CU_ASSERT(!desc2->write); 6857 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6858 &bdev_ut_if); 6859 CU_ASSERT(rc == -EPERM); 6860 CU_ASSERT(!desc2->write); 6861 6862 /* No new v2 RWM claims are allowed */ 6863 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6864 opts.shared_claim_key = (uint64_t)&opts; 6865 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6866 &bdev_ut_if); 6867 CU_ASSERT(rc == -EPERM); 6868 CU_ASSERT(!desc2->write); 6869 6870 /* No new v1 claims are allowed */ 6871 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6872 CU_ASSERT(rc == -EPERM); 6873 6874 /* None of the above changed the existing claim */ 6875 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6876 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6877 6878 /* Closing the first descriptor now allows a new claim and it is promoted to rw. */ 6879 spdk_bdev_close(desc); 6880 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6881 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6882 CU_ASSERT(!desc2->write); 6883 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6884 &bdev_ut_if); 6885 CU_ASSERT(rc == 0); 6886 CU_ASSERT(desc2->claim != NULL); 6887 CU_ASSERT(desc2->write); 6888 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 6889 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 6890 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6891 spdk_bdev_close(desc2); 6892 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6893 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6894 6895 /* Cannot claim with a key */ 6896 desc = NULL; 6897 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6898 CU_ASSERT(rc == 0); 6899 SPDK_CU_ASSERT_FATAL(desc != NULL); 6900 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6901 opts.shared_claim_key = (uint64_t)&opts; 6902 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, &opts, 6903 &bdev_ut_if); 6904 CU_ASSERT(rc == -EINVAL); 6905 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6906 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6907 spdk_bdev_close(desc); 6908 6909 /* Clean up */ 6910 free_bdev(bdev); 6911 } 6912 6913 static void 6914 claim_v2_rom(void) 6915 { 6916 struct spdk_bdev *bdev; 6917 struct spdk_bdev_desc *desc; 6918 struct spdk_bdev_desc *desc2; 6919 struct spdk_bdev_claim_opts opts; 6920 int rc; 6921 6922 bdev = allocate_bdev("bdev0"); 6923 6924 /* Claim without options */ 6925 desc = NULL; 6926 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6927 CU_ASSERT(rc == 0); 6928 SPDK_CU_ASSERT_FATAL(desc != NULL); 6929 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 6930 &bdev_ut_if); 6931 CU_ASSERT(rc == 0); 6932 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6933 CU_ASSERT(desc->claim != NULL); 6934 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6935 CU_ASSERT(strcmp(desc->claim->name, "") == 0); 6936 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6937 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6938 6939 /* Release the claim by closing the descriptor */ 6940 spdk_bdev_close(desc); 6941 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6942 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 6943 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 6944 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 6945 6946 /* Claim with options */ 6947 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6948 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 6949 desc = NULL; 6950 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 6951 CU_ASSERT(rc == 0); 6952 SPDK_CU_ASSERT_FATAL(desc != NULL); 6953 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 6954 &bdev_ut_if); 6955 CU_ASSERT(rc == 0); 6956 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 6957 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 6958 CU_ASSERT(desc->claim->module == &bdev_ut_if); 6959 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6960 memset(&opts, 0, sizeof(opts)); 6961 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 6962 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6963 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6964 6965 /* The claim blocks new writers. */ 6966 desc2 = NULL; 6967 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 6968 CU_ASSERT(rc == -EPERM); 6969 CU_ASSERT(desc2 == NULL); 6970 6971 /* New readers are allowed */ 6972 desc2 = NULL; 6973 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 6974 CU_ASSERT(rc == 0); 6975 CU_ASSERT(desc2 != NULL); 6976 CU_ASSERT(!desc2->write); 6977 6978 /* No new v2 RWO claims are allowed */ 6979 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 6980 &bdev_ut_if); 6981 CU_ASSERT(rc == -EPERM); 6982 6983 /* No new v2 RWM claims are allowed */ 6984 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 6985 opts.shared_claim_key = (uint64_t)&opts; 6986 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 6987 &bdev_ut_if); 6988 CU_ASSERT(rc == -EPERM); 6989 CU_ASSERT(!desc2->write); 6990 6991 /* No new v1 claims are allowed */ 6992 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 6993 CU_ASSERT(rc == -EPERM); 6994 6995 /* None of the above messed up the existing claim */ 6996 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 6997 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 6998 6999 /* New v2 ROM claims are allowed and the descriptor stays read-only. */ 7000 CU_ASSERT(!desc2->write); 7001 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7002 &bdev_ut_if); 7003 CU_ASSERT(rc == 0); 7004 CU_ASSERT(!desc2->write); 7005 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7006 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7007 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7008 7009 /* Claim remains when closing the first descriptor */ 7010 spdk_bdev_close(desc); 7011 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE); 7012 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7013 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7014 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7015 7016 /* Claim removed when closing the other descriptor */ 7017 spdk_bdev_close(desc2); 7018 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7019 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7020 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7021 7022 /* Cannot claim with a key */ 7023 desc = NULL; 7024 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7025 CU_ASSERT(rc == 0); 7026 SPDK_CU_ASSERT_FATAL(desc != NULL); 7027 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7028 opts.shared_claim_key = (uint64_t)&opts; 7029 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, &opts, 7030 &bdev_ut_if); 7031 CU_ASSERT(rc == -EINVAL); 7032 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7033 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7034 spdk_bdev_close(desc); 7035 7036 /* Cannot claim with a read-write descriptor */ 7037 desc = NULL; 7038 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7039 CU_ASSERT(rc == 0); 7040 SPDK_CU_ASSERT_FATAL(desc != NULL); 7041 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7042 &bdev_ut_if); 7043 CU_ASSERT(rc == -EINVAL); 7044 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7045 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7046 spdk_bdev_close(desc); 7047 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7048 7049 /* Clean up */ 7050 free_bdev(bdev); 7051 } 7052 7053 static void 7054 claim_v2_rwm(void) 7055 { 7056 struct spdk_bdev *bdev; 7057 struct spdk_bdev_desc *desc; 7058 struct spdk_bdev_desc *desc2; 7059 struct spdk_bdev_claim_opts opts; 7060 char good_key, bad_key; 7061 int rc; 7062 7063 bdev = allocate_bdev("bdev0"); 7064 7065 /* Claim without options should fail */ 7066 desc = NULL; 7067 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7068 CU_ASSERT(rc == 0); 7069 SPDK_CU_ASSERT_FATAL(desc != NULL); 7070 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, NULL, 7071 &bdev_ut_if); 7072 CU_ASSERT(rc == -EINVAL); 7073 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7074 UT_ASSERT_CLAIM_V2_COUNT(bdev, 0); 7075 CU_ASSERT(desc->claim == NULL); 7076 7077 /* Claim with options */ 7078 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7079 snprintf(opts.name, sizeof(opts.name), "%s", "claim with options"); 7080 opts.shared_claim_key = (uint64_t)&good_key; 7081 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7082 &bdev_ut_if); 7083 CU_ASSERT(rc == 0); 7084 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7085 SPDK_CU_ASSERT_FATAL(desc->claim != NULL); 7086 CU_ASSERT(desc->claim->module == &bdev_ut_if); 7087 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7088 memset(&opts, 0, sizeof(opts)); 7089 CU_ASSERT(strcmp(desc->claim->name, "claim with options") == 0); 7090 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7091 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7092 7093 /* The claim blocks new writers. */ 7094 desc2 = NULL; 7095 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7096 CU_ASSERT(rc == -EPERM); 7097 CU_ASSERT(desc2 == NULL); 7098 7099 /* New readers are allowed */ 7100 desc2 = NULL; 7101 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc2); 7102 CU_ASSERT(rc == 0); 7103 CU_ASSERT(desc2 != NULL); 7104 CU_ASSERT(!desc2->write); 7105 7106 /* No new v2 RWO claims are allowed */ 7107 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, NULL, 7108 &bdev_ut_if); 7109 CU_ASSERT(rc == -EPERM); 7110 7111 /* No new v2 ROM claims are allowed and the descriptor stays read-only. */ 7112 CU_ASSERT(!desc2->write); 7113 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE, NULL, 7114 &bdev_ut_if); 7115 CU_ASSERT(rc == -EPERM); 7116 CU_ASSERT(!desc2->write); 7117 7118 /* No new v1 claims are allowed */ 7119 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7120 CU_ASSERT(rc == -EPERM); 7121 7122 /* No new v2 RWM claims are allowed if the key does not match */ 7123 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7124 opts.shared_claim_key = (uint64_t)&bad_key; 7125 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7126 &bdev_ut_if); 7127 CU_ASSERT(rc == -EPERM); 7128 CU_ASSERT(!desc2->write); 7129 7130 /* None of the above messed up the existing claim */ 7131 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc->claim); 7132 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7133 7134 /* New v2 RWM claims are allowed and the descriptor is promoted if the key matches. */ 7135 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7136 opts.shared_claim_key = (uint64_t)&good_key; 7137 CU_ASSERT(!desc2->write); 7138 rc = spdk_bdev_module_claim_bdev_desc(desc2, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7139 &bdev_ut_if); 7140 CU_ASSERT(rc == 0); 7141 CU_ASSERT(desc2->write); 7142 CU_ASSERT(TAILQ_NEXT(desc->claim, link) == desc2->claim); 7143 UT_ASSERT_CLAIM_V2_COUNT(bdev, 2); 7144 7145 /* Claim remains when closing the first descriptor */ 7146 spdk_bdev_close(desc); 7147 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED); 7148 CU_ASSERT(!TAILQ_EMPTY(&bdev->internal.open_descs)); 7149 CU_ASSERT(TAILQ_FIRST(&bdev->internal.claim.v2.claims) == desc2->claim); 7150 UT_ASSERT_CLAIM_V2_COUNT(bdev, 1); 7151 7152 /* Claim removed when closing the other descriptor */ 7153 spdk_bdev_close(desc2); 7154 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7155 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7156 7157 /* Cannot claim without a key */ 7158 desc = NULL; 7159 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7160 CU_ASSERT(rc == 0); 7161 SPDK_CU_ASSERT_FATAL(desc != NULL); 7162 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7163 rc = spdk_bdev_module_claim_bdev_desc(desc, SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, &opts, 7164 &bdev_ut_if); 7165 CU_ASSERT(rc == -EINVAL); 7166 spdk_bdev_close(desc); 7167 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7168 CU_ASSERT(TAILQ_EMPTY(&bdev->internal.open_descs)); 7169 7170 /* Clean up */ 7171 free_bdev(bdev); 7172 } 7173 7174 static void 7175 claim_v2_existing_writer(void) 7176 { 7177 struct spdk_bdev *bdev; 7178 struct spdk_bdev_desc *desc; 7179 struct spdk_bdev_desc *desc2; 7180 struct spdk_bdev_claim_opts opts; 7181 enum spdk_bdev_claim_type type; 7182 enum spdk_bdev_claim_type types[] = { 7183 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7184 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7185 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7186 }; 7187 size_t i; 7188 int rc; 7189 7190 bdev = allocate_bdev("bdev0"); 7191 7192 desc = NULL; 7193 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc); 7194 CU_ASSERT(rc == 0); 7195 SPDK_CU_ASSERT_FATAL(desc != NULL); 7196 desc2 = NULL; 7197 rc = spdk_bdev_open_ext("bdev0", true, bdev_ut_event_cb, NULL, &desc2); 7198 CU_ASSERT(rc == 0); 7199 SPDK_CU_ASSERT_FATAL(desc2 != NULL); 7200 7201 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7202 type = types[i]; 7203 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7204 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7205 opts.shared_claim_key = (uint64_t)&opts; 7206 } 7207 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7208 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7209 CU_ASSERT(rc == -EINVAL); 7210 } else { 7211 CU_ASSERT(rc == -EPERM); 7212 } 7213 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7214 rc = spdk_bdev_module_claim_bdev_desc(desc2, type, &opts, &bdev_ut_if); 7215 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE) { 7216 CU_ASSERT(rc == -EINVAL); 7217 } else { 7218 CU_ASSERT(rc == -EPERM); 7219 } 7220 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_NONE); 7221 } 7222 7223 spdk_bdev_close(desc); 7224 spdk_bdev_close(desc2); 7225 7226 /* Clean up */ 7227 free_bdev(bdev); 7228 } 7229 7230 static void 7231 claim_v2_existing_v1(void) 7232 { 7233 struct spdk_bdev *bdev; 7234 struct spdk_bdev_desc *desc; 7235 struct spdk_bdev_claim_opts opts; 7236 enum spdk_bdev_claim_type type; 7237 enum spdk_bdev_claim_type types[] = { 7238 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7239 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7240 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7241 }; 7242 size_t i; 7243 int rc; 7244 7245 bdev = allocate_bdev("bdev0"); 7246 7247 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7248 CU_ASSERT(rc == 0); 7249 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7250 7251 desc = NULL; 7252 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7253 CU_ASSERT(rc == 0); 7254 SPDK_CU_ASSERT_FATAL(desc != NULL); 7255 7256 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7257 type = types[i]; 7258 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7259 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7260 opts.shared_claim_key = (uint64_t)&opts; 7261 } 7262 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7263 CU_ASSERT(rc == -EPERM); 7264 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 7265 } 7266 7267 spdk_bdev_module_release_bdev(bdev); 7268 spdk_bdev_close(desc); 7269 7270 /* Clean up */ 7271 free_bdev(bdev); 7272 } 7273 7274 static void 7275 claim_v1_existing_v2(void) 7276 { 7277 struct spdk_bdev *bdev; 7278 struct spdk_bdev_desc *desc; 7279 struct spdk_bdev_claim_opts opts; 7280 enum spdk_bdev_claim_type type; 7281 enum spdk_bdev_claim_type types[] = { 7282 SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE, 7283 SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED, 7284 SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE 7285 }; 7286 size_t i; 7287 int rc; 7288 7289 bdev = allocate_bdev("bdev0"); 7290 7291 for (i = 0; i < SPDK_COUNTOF(types); i++) { 7292 type = types[i]; 7293 7294 desc = NULL; 7295 rc = spdk_bdev_open_ext("bdev0", false, bdev_ut_event_cb, NULL, &desc); 7296 CU_ASSERT(rc == 0); 7297 SPDK_CU_ASSERT_FATAL(desc != NULL); 7298 7299 /* Get a v2 claim */ 7300 spdk_bdev_claim_opts_init(&opts, sizeof(opts)); 7301 if (type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_SHARED) { 7302 opts.shared_claim_key = (uint64_t)&opts; 7303 } 7304 rc = spdk_bdev_module_claim_bdev_desc(desc, type, &opts, &bdev_ut_if); 7305 CU_ASSERT(rc == 0); 7306 7307 /* Fail to get a v1 claim */ 7308 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7309 CU_ASSERT(rc == -EPERM); 7310 7311 spdk_bdev_close(desc); 7312 7313 /* Now v1 succeeds */ 7314 rc = spdk_bdev_module_claim_bdev(bdev, NULL, &bdev_ut_if); 7315 CU_ASSERT(rc == 0) 7316 spdk_bdev_module_release_bdev(bdev); 7317 } 7318 7319 /* Clean up */ 7320 free_bdev(bdev); 7321 } 7322 7323 static int ut_examine_claimed_init0(void); 7324 static int ut_examine_claimed_init1(void); 7325 static void ut_examine_claimed_config0(struct spdk_bdev *bdev); 7326 static void ut_examine_claimed_disk0(struct spdk_bdev *bdev); 7327 static void ut_examine_claimed_config1(struct spdk_bdev *bdev); 7328 static void ut_examine_claimed_disk1(struct spdk_bdev *bdev); 7329 7330 #define UT_MAX_EXAMINE_MODS 2 7331 struct spdk_bdev_module examine_claimed_mods[UT_MAX_EXAMINE_MODS] = { 7332 { 7333 .name = "vbdev_ut_examine0", 7334 .module_init = ut_examine_claimed_init0, 7335 .module_fini = vbdev_ut_module_fini, 7336 .examine_config = ut_examine_claimed_config0, 7337 .examine_disk = ut_examine_claimed_disk0, 7338 }, 7339 { 7340 .name = "vbdev_ut_examine1", 7341 .module_init = ut_examine_claimed_init1, 7342 .module_fini = vbdev_ut_module_fini, 7343 .examine_config = ut_examine_claimed_config1, 7344 .examine_disk = ut_examine_claimed_disk1, 7345 } 7346 }; 7347 7348 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed0, &examine_claimed_mods[0]) 7349 SPDK_BDEV_MODULE_REGISTER(bdev_ut_claimed1, &examine_claimed_mods[1]) 7350 7351 struct ut_examine_claimed_ctx { 7352 uint32_t examine_config_count; 7353 uint32_t examine_disk_count; 7354 7355 /* Claim type to take, with these options */ 7356 enum spdk_bdev_claim_type claim_type; 7357 struct spdk_bdev_claim_opts claim_opts; 7358 7359 /* Expected return value from spdk_bdev_module_claim_bdev_desc() */ 7360 int expect_claim_err; 7361 7362 /* Descriptor used for a claim */ 7363 struct spdk_bdev_desc *desc; 7364 } examine_claimed_ctx[UT_MAX_EXAMINE_MODS]; 7365 7366 bool ut_testing_examine_claimed; 7367 7368 /* 7369 * Store the order in which the modules were initialized, 7370 * since we have no guarantee on the order of execution of the constructors. 7371 * Modules are examined in reverse order of their initialization. 7372 */ 7373 static int g_ut_examine_claimed_order[UT_MAX_EXAMINE_MODS]; 7374 static int 7375 ut_examine_claimed_init(uint32_t modnum) 7376 { 7377 static int current = UT_MAX_EXAMINE_MODS; 7378 7379 /* Only do this for the first initialization of the bdev framework */ 7380 if (current == 0) { 7381 return 0; 7382 } 7383 g_ut_examine_claimed_order[modnum] = --current; 7384 7385 return 0; 7386 } 7387 7388 static int 7389 ut_examine_claimed_init0(void) 7390 { 7391 return ut_examine_claimed_init(0); 7392 } 7393 7394 static int 7395 ut_examine_claimed_init1(void) 7396 { 7397 return ut_examine_claimed_init(1); 7398 } 7399 7400 static void 7401 reset_examine_claimed_ctx(void) 7402 { 7403 struct ut_examine_claimed_ctx *ctx; 7404 uint32_t i; 7405 7406 for (i = 0; i < SPDK_COUNTOF(examine_claimed_ctx); i++) { 7407 ctx = &examine_claimed_ctx[i]; 7408 if (ctx->desc != NULL) { 7409 spdk_bdev_close(ctx->desc); 7410 } 7411 memset(ctx, 0, sizeof(*ctx)); 7412 spdk_bdev_claim_opts_init(&ctx->claim_opts, sizeof(ctx->claim_opts)); 7413 } 7414 } 7415 7416 static void 7417 examine_claimed_config(struct spdk_bdev *bdev, uint32_t modnum) 7418 { 7419 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7420 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7421 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7422 int rc; 7423 7424 if (!ut_testing_examine_claimed) { 7425 spdk_bdev_module_examine_done(module); 7426 return; 7427 } 7428 7429 ctx->examine_config_count++; 7430 7431 if (ctx->claim_type != SPDK_BDEV_CLAIM_NONE) { 7432 rc = spdk_bdev_open_ext(bdev->name, false, bdev_ut_event_cb, &ctx->claim_opts, 7433 &ctx->desc); 7434 CU_ASSERT(rc == 0); 7435 7436 rc = spdk_bdev_module_claim_bdev_desc(ctx->desc, ctx->claim_type, NULL, module); 7437 CU_ASSERT(rc == ctx->expect_claim_err); 7438 } 7439 spdk_bdev_module_examine_done(module); 7440 } 7441 7442 static void 7443 ut_examine_claimed_config0(struct spdk_bdev *bdev) 7444 { 7445 examine_claimed_config(bdev, g_ut_examine_claimed_order[0]); 7446 } 7447 7448 static void 7449 ut_examine_claimed_config1(struct spdk_bdev *bdev) 7450 { 7451 examine_claimed_config(bdev, g_ut_examine_claimed_order[1]); 7452 } 7453 7454 static void 7455 examine_claimed_disk(struct spdk_bdev *bdev, uint32_t modnum) 7456 { 7457 SPDK_CU_ASSERT_FATAL(modnum < UT_MAX_EXAMINE_MODS); 7458 struct spdk_bdev_module *module = &examine_claimed_mods[modnum]; 7459 struct ut_examine_claimed_ctx *ctx = &examine_claimed_ctx[modnum]; 7460 7461 if (!ut_testing_examine_claimed) { 7462 spdk_bdev_module_examine_done(module); 7463 return; 7464 } 7465 7466 ctx->examine_disk_count++; 7467 7468 spdk_bdev_module_examine_done(module); 7469 } 7470 7471 static void 7472 ut_examine_claimed_disk0(struct spdk_bdev *bdev) 7473 { 7474 examine_claimed_disk(bdev, 0); 7475 } 7476 7477 static void 7478 ut_examine_claimed_disk1(struct spdk_bdev *bdev) 7479 { 7480 examine_claimed_disk(bdev, 1); 7481 } 7482 7483 static void 7484 examine_claimed(void) 7485 { 7486 struct spdk_bdev *bdev; 7487 struct spdk_bdev_module *mod = examine_claimed_mods; 7488 struct ut_examine_claimed_ctx *ctx = examine_claimed_ctx; 7489 7490 ut_testing_examine_claimed = true; 7491 reset_examine_claimed_ctx(); 7492 7493 /* 7494 * With one module claiming, both modules' examine_config should be called, but only the 7495 * claiming module's examine_disk should be called. 7496 */ 7497 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7498 bdev = allocate_bdev("bdev0"); 7499 CU_ASSERT(ctx[0].examine_config_count == 1); 7500 CU_ASSERT(ctx[0].examine_disk_count == 1); 7501 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7502 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7503 CU_ASSERT(ctx[1].examine_config_count == 1); 7504 CU_ASSERT(ctx[1].examine_disk_count == 0); 7505 CU_ASSERT(ctx[1].desc == NULL); 7506 reset_examine_claimed_ctx(); 7507 free_bdev(bdev); 7508 7509 /* 7510 * With two modules claiming, both modules' examine_config and examine_disk should be 7511 * called. 7512 */ 7513 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7514 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7515 bdev = allocate_bdev("bdev0"); 7516 CU_ASSERT(ctx[0].examine_config_count == 1); 7517 CU_ASSERT(ctx[0].examine_disk_count == 1); 7518 SPDK_CU_ASSERT_FATAL(ctx[0].desc != NULL); 7519 CU_ASSERT(ctx[0].desc->claim->module == &mod[0]); 7520 CU_ASSERT(ctx[1].examine_config_count == 1); 7521 CU_ASSERT(ctx[1].examine_disk_count == 1); 7522 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7523 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7524 reset_examine_claimed_ctx(); 7525 free_bdev(bdev); 7526 7527 /* 7528 * If two vbdev modules try to claim with conflicting claim types, the module that was added 7529 * last wins. The winner gets the claim and is the only one that has its examine_disk 7530 * callback invoked. 7531 */ 7532 ctx[0].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_NONE; 7533 ctx[0].expect_claim_err = -EPERM; 7534 ctx[1].claim_type = SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE; 7535 bdev = allocate_bdev("bdev0"); 7536 CU_ASSERT(ctx[0].examine_config_count == 1); 7537 CU_ASSERT(ctx[0].examine_disk_count == 0); 7538 CU_ASSERT(ctx[1].examine_config_count == 1); 7539 CU_ASSERT(ctx[1].examine_disk_count == 1); 7540 SPDK_CU_ASSERT_FATAL(ctx[1].desc != NULL); 7541 CU_ASSERT(ctx[1].desc->claim->module == &mod[1]); 7542 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_READ_MANY_WRITE_ONE); 7543 reset_examine_claimed_ctx(); 7544 free_bdev(bdev); 7545 7546 ut_testing_examine_claimed = false; 7547 } 7548 7549 static void 7550 get_numa_id(void) 7551 { 7552 struct spdk_bdev bdev = {}; 7553 7554 bdev.numa.id = 0; 7555 bdev.numa.id_valid = 0; 7556 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7557 7558 bdev.numa.id_valid = 1; 7559 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == 0); 7560 7561 bdev.numa.id = SPDK_ENV_NUMA_ID_ANY; 7562 CU_ASSERT(spdk_bdev_get_numa_id(&bdev) == SPDK_ENV_NUMA_ID_ANY); 7563 } 7564 7565 int 7566 main(int argc, char **argv) 7567 { 7568 CU_pSuite suite = NULL; 7569 unsigned int num_failures; 7570 7571 CU_initialize_registry(); 7572 7573 suite = CU_add_suite("bdev", ut_bdev_setup, ut_bdev_teardown); 7574 7575 CU_ADD_TEST(suite, bytes_to_blocks_test); 7576 CU_ADD_TEST(suite, num_blocks_test); 7577 CU_ADD_TEST(suite, io_valid_test); 7578 CU_ADD_TEST(suite, open_write_test); 7579 CU_ADD_TEST(suite, claim_test); 7580 CU_ADD_TEST(suite, alias_add_del_test); 7581 CU_ADD_TEST(suite, get_device_stat_test); 7582 CU_ADD_TEST(suite, bdev_io_types_test); 7583 CU_ADD_TEST(suite, bdev_io_wait_test); 7584 CU_ADD_TEST(suite, bdev_io_spans_split_test); 7585 CU_ADD_TEST(suite, bdev_io_boundary_split_test); 7586 CU_ADD_TEST(suite, bdev_io_max_size_and_segment_split_test); 7587 CU_ADD_TEST(suite, bdev_io_mix_split_test); 7588 CU_ADD_TEST(suite, bdev_io_split_with_io_wait); 7589 CU_ADD_TEST(suite, bdev_io_write_unit_split_test); 7590 CU_ADD_TEST(suite, bdev_io_alignment_with_boundary); 7591 CU_ADD_TEST(suite, bdev_io_alignment); 7592 CU_ADD_TEST(suite, bdev_histograms); 7593 CU_ADD_TEST(suite, bdev_write_zeroes); 7594 CU_ADD_TEST(suite, bdev_compare_and_write); 7595 CU_ADD_TEST(suite, bdev_compare); 7596 CU_ADD_TEST(suite, bdev_compare_emulated); 7597 CU_ADD_TEST(suite, bdev_zcopy_write); 7598 CU_ADD_TEST(suite, bdev_zcopy_read); 7599 CU_ADD_TEST(suite, bdev_open_while_hotremove); 7600 CU_ADD_TEST(suite, bdev_close_while_hotremove); 7601 CU_ADD_TEST(suite, bdev_open_ext_test); 7602 CU_ADD_TEST(suite, bdev_open_ext_unregister); 7603 CU_ADD_TEST(suite, bdev_set_io_timeout); 7604 CU_ADD_TEST(suite, bdev_set_qd_sampling); 7605 CU_ADD_TEST(suite, lba_range_overlap); 7606 CU_ADD_TEST(suite, lock_lba_range_check_ranges); 7607 CU_ADD_TEST(suite, lock_lba_range_with_io_outstanding); 7608 CU_ADD_TEST(suite, lock_lba_range_overlapped); 7609 CU_ADD_TEST(suite, bdev_quiesce); 7610 CU_ADD_TEST(suite, bdev_io_abort); 7611 CU_ADD_TEST(suite, bdev_unmap); 7612 CU_ADD_TEST(suite, bdev_write_zeroes_split_test); 7613 CU_ADD_TEST(suite, bdev_set_options_test); 7614 CU_ADD_TEST(suite, bdev_get_memory_domains); 7615 CU_ADD_TEST(suite, bdev_io_ext); 7616 CU_ADD_TEST(suite, bdev_io_ext_no_opts); 7617 CU_ADD_TEST(suite, bdev_io_ext_invalid_opts); 7618 CU_ADD_TEST(suite, bdev_io_ext_split); 7619 CU_ADD_TEST(suite, bdev_io_ext_bounce_buffer); 7620 CU_ADD_TEST(suite, bdev_register_uuid_alias); 7621 CU_ADD_TEST(suite, bdev_unregister_by_name); 7622 CU_ADD_TEST(suite, for_each_bdev_test); 7623 CU_ADD_TEST(suite, bdev_seek_test); 7624 CU_ADD_TEST(suite, bdev_copy); 7625 CU_ADD_TEST(suite, bdev_copy_split_test); 7626 CU_ADD_TEST(suite, examine_locks); 7627 CU_ADD_TEST(suite, claim_v2_rwo); 7628 CU_ADD_TEST(suite, claim_v2_rom); 7629 CU_ADD_TEST(suite, claim_v2_rwm); 7630 CU_ADD_TEST(suite, claim_v2_existing_writer); 7631 CU_ADD_TEST(suite, claim_v2_existing_v1); 7632 CU_ADD_TEST(suite, claim_v1_existing_v2); 7633 CU_ADD_TEST(suite, examine_claimed); 7634 CU_ADD_TEST(suite, get_numa_id); 7635 7636 allocate_cores(1); 7637 allocate_threads(1); 7638 set_thread(0); 7639 7640 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7641 CU_cleanup_registry(); 7642 7643 free_threads(); 7644 free_cores(); 7645 7646 return num_failures; 7647 } 7648