1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/env.h" 10 #include "spdk_internal/mock.h" 11 #include "thread/thread_internal.h" 12 #include "bdev/raid/bdev_raid.c" 13 #include "bdev/raid/bdev_raid_rpc.c" 14 #include "bdev/raid/raid0.c" 15 #include "common/lib/ut_multithread.c" 16 17 #define MAX_BASE_DRIVES 32 18 #define MAX_RAIDS 2 19 #define INVALID_IO_SUBMIT 0xFFFF 20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5)) 21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul) 22 #define MD_SIZE 8 23 24 struct spdk_bdev_channel { 25 struct spdk_io_channel *channel; 26 }; 27 28 struct spdk_bdev_desc { 29 struct spdk_bdev *bdev; 30 }; 31 32 /* Data structure to capture the output of IO for verification */ 33 struct io_output { 34 struct spdk_bdev_desc *desc; 35 struct spdk_io_channel *ch; 36 uint64_t offset_blocks; 37 uint64_t num_blocks; 38 spdk_bdev_io_completion_cb cb; 39 void *cb_arg; 40 enum spdk_bdev_io_type iotype; 41 struct iovec *iovs; 42 int iovcnt; 43 void *md_buf; 44 }; 45 46 struct raid_io_ranges { 47 uint64_t lba; 48 uint64_t nblocks; 49 }; 50 51 /* Globals */ 52 int g_bdev_io_submit_status; 53 struct io_output *g_io_output = NULL; 54 uint32_t g_io_output_index; 55 uint32_t g_io_comp_status; 56 bool g_child_io_status_flag; 57 void *g_rpc_req; 58 uint32_t g_rpc_req_size; 59 TAILQ_HEAD(bdev, spdk_bdev); 60 struct bdev g_bdev_list; 61 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry); 62 struct waitq g_io_waitq; 63 uint32_t g_block_len; 64 uint32_t g_strip_size; 65 uint32_t g_max_io_size; 66 uint8_t g_max_base_drives; 67 uint8_t g_max_raids; 68 uint8_t g_ignore_io_output; 69 uint8_t g_rpc_err; 70 char *g_get_raids_output[MAX_RAIDS]; 71 uint32_t g_get_raids_count; 72 uint8_t g_json_decode_obj_err; 73 uint8_t g_json_decode_obj_create; 74 uint8_t g_config_level_create = 0; 75 uint8_t g_test_multi_raids; 76 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE]; 77 uint32_t g_io_range_idx; 78 uint64_t g_lba_offset; 79 uint64_t g_bdev_ch_io_device; 80 bool g_bdev_io_defer_completion; 81 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios); 82 bool g_enable_dif; 83 84 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 85 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 86 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, 87 enum spdk_bdev_io_type io_type), true); 88 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 89 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 90 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 91 void *cb_arg), 0); 92 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp), 93 NULL); 94 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func, 95 uint32_t state_mask)); 96 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias)); 97 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request, 98 struct spdk_json_write_ctx *w)); 99 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request, 100 bool value)); 101 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0); 102 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0); 103 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0); 104 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values, 105 spdk_json_decode_fn decode_func, 106 void *out, size_t max_size, size_t *out_size, size_t stride), 0); 107 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0); 108 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0); 109 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0); 110 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w, 111 const char *name), 0); 112 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0); 113 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0); 114 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0); 115 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0); 116 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w, 117 const char *name), 0); 118 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0); 119 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0); 120 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name, 121 uint64_t val), 0); 122 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL); 123 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 124 struct spdk_bdev_io_wait_entry *entry), 0); 125 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev, 126 struct spdk_memory_domain **domains, int array_size), 0); 127 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev"); 128 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false); 129 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0); 130 DEFINE_STUB(spdk_json_write_named_uuid, int, (struct spdk_json_write_ctx *w, const char *name, 131 const struct spdk_uuid *val), 0); 132 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev)); 133 DEFINE_STUB(raid_bdev_alloc_superblock, int, (struct raid_bdev *raid_bdev, uint32_t block_size), 0); 134 DEFINE_STUB_V(raid_bdev_free_superblock, (struct raid_bdev *raid_bdev)); 135 136 137 uint32_t 138 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev) 139 { 140 return g_block_len; 141 } 142 143 typedef enum spdk_dif_type spdk_dif_type_t; 144 145 spdk_dif_type_t 146 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev) 147 { 148 if (bdev->md_len != 0) { 149 return bdev->dif_type; 150 } else { 151 return SPDK_DIF_DISABLE; 152 } 153 } 154 155 bool 156 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev) 157 { 158 return (bdev->md_len != 0) && bdev->md_interleave; 159 } 160 161 bool 162 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev) 163 { 164 return (bdev->md_len != 0) && !bdev->md_interleave; 165 } 166 167 uint32_t 168 spdk_bdev_get_md_size(const struct spdk_bdev *bdev) 169 { 170 return bdev->md_len; 171 } 172 173 uint32_t 174 spdk_bdev_get_block_size(const struct spdk_bdev *bdev) 175 { 176 return bdev->blocklen; 177 } 178 179 int 180 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 181 raid_bdev_load_sb_cb cb, void *cb_ctx) 182 { 183 cb(NULL, -EINVAL, cb_ctx); 184 185 return 0; 186 } 187 188 void 189 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx) 190 { 191 cb(0, raid_bdev, cb_ctx); 192 } 193 194 const struct spdk_uuid * 195 spdk_bdev_get_uuid(const struct spdk_bdev *bdev) 196 { 197 return &bdev->uuid; 198 } 199 200 struct spdk_io_channel * 201 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc) 202 { 203 return spdk_get_io_channel(&g_bdev_ch_io_device); 204 } 205 206 static int 207 set_test_opts(void) 208 { 209 210 g_max_base_drives = MAX_BASE_DRIVES; 211 g_max_raids = MAX_RAIDS; 212 g_block_len = 4096; 213 g_strip_size = 64; 214 g_max_io_size = 1024; 215 g_enable_dif = false; 216 217 printf("Test Options\n"); 218 printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, " 219 "g_max_raids = %u, g_enable_dif = %d\n", 220 g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids, 221 g_enable_dif); 222 223 return 0; 224 } 225 226 static int 227 set_test_opts_dif(void) 228 { 229 230 g_max_base_drives = MAX_BASE_DRIVES; 231 g_max_raids = MAX_RAIDS; 232 g_block_len = 4096; 233 g_strip_size = 64; 234 g_max_io_size = 1024; 235 g_enable_dif = true; 236 237 printf("Test Options\n"); 238 printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, " 239 "g_max_raids = %u, g_enable_dif = %d\n", 240 g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids, 241 g_enable_dif); 242 243 return 0; 244 } 245 246 /* Set globals before every test run */ 247 static void 248 set_globals(void) 249 { 250 uint32_t max_splits; 251 252 g_bdev_io_submit_status = 0; 253 if (g_max_io_size < g_strip_size) { 254 max_splits = 2; 255 } else { 256 max_splits = (g_max_io_size / g_strip_size) + 1; 257 } 258 if (max_splits < g_max_base_drives) { 259 max_splits = g_max_base_drives; 260 } 261 262 g_io_output = calloc(max_splits, sizeof(struct io_output)); 263 SPDK_CU_ASSERT_FATAL(g_io_output != NULL); 264 g_io_output_index = 0; 265 memset(g_get_raids_output, 0, sizeof(g_get_raids_output)); 266 g_get_raids_count = 0; 267 g_io_comp_status = 0; 268 g_ignore_io_output = 0; 269 g_config_level_create = 0; 270 g_rpc_err = 0; 271 g_test_multi_raids = 0; 272 g_child_io_status_flag = true; 273 TAILQ_INIT(&g_bdev_list); 274 TAILQ_INIT(&g_io_waitq); 275 g_rpc_req = NULL; 276 g_rpc_req_size = 0; 277 g_json_decode_obj_err = 0; 278 g_json_decode_obj_create = 0; 279 g_lba_offset = 0; 280 g_bdev_io_defer_completion = false; 281 } 282 283 static void 284 base_bdevs_cleanup(void) 285 { 286 struct spdk_bdev *bdev; 287 struct spdk_bdev *bdev_next; 288 289 if (!TAILQ_EMPTY(&g_bdev_list)) { 290 TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) { 291 free(bdev->name); 292 TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); 293 free(bdev); 294 } 295 } 296 } 297 298 static void 299 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev) 300 { 301 struct raid_base_bdev_info *base_info; 302 303 assert(raid_bdev != NULL); 304 assert(raid_bdev->base_bdev_info != NULL); 305 306 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 307 if (base_info->desc) { 308 raid_bdev_free_base_bdev_resource(base_info); 309 } 310 } 311 assert(raid_bdev->num_base_bdevs_discovered == 0); 312 raid_bdev_cleanup_and_free(raid_bdev); 313 } 314 315 /* Reset globals */ 316 static void 317 reset_globals(void) 318 { 319 if (g_io_output) { 320 free(g_io_output); 321 g_io_output = NULL; 322 } 323 g_rpc_req = NULL; 324 g_rpc_req_size = 0; 325 } 326 327 void 328 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, 329 uint64_t len) 330 { 331 cb(bdev_io->internal.ch->channel, bdev_io, true); 332 } 333 334 static void 335 generate_dif(struct iovec *iovs, int iovcnt, void *md_buf, 336 uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev) 337 { 338 struct spdk_dif_ctx dif_ctx; 339 int rc; 340 struct spdk_dif_ctx_init_ext_opts dif_opts; 341 spdk_dif_type_t dif_type; 342 bool md_interleaved; 343 struct iovec md_iov; 344 345 dif_type = spdk_bdev_get_dif_type(bdev); 346 md_interleaved = spdk_bdev_is_md_interleaved(bdev); 347 348 if (dif_type == SPDK_DIF_DISABLE) { 349 return; 350 } 351 352 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 353 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 354 rc = spdk_dif_ctx_init(&dif_ctx, 355 spdk_bdev_get_block_size(bdev), 356 spdk_bdev_get_md_size(bdev), 357 md_interleaved, 358 spdk_bdev_is_dif_head_of_md(bdev), 359 dif_type, 360 bdev->dif_check_flags, 361 offset_blocks, 362 0xFFFF, 0x123, 0, 0, &dif_opts); 363 SPDK_CU_ASSERT_FATAL(rc == 0); 364 365 if (!md_interleaved) { 366 md_iov.iov_base = md_buf; 367 md_iov.iov_len = spdk_bdev_get_md_size(bdev) * num_blocks; 368 369 rc = spdk_dix_generate(iovs, iovcnt, &md_iov, num_blocks, &dif_ctx); 370 SPDK_CU_ASSERT_FATAL(rc == 0); 371 } 372 } 373 374 static void 375 verify_dif(struct iovec *iovs, int iovcnt, void *md_buf, 376 uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev) 377 { 378 struct spdk_dif_ctx dif_ctx; 379 int rc; 380 struct spdk_dif_ctx_init_ext_opts dif_opts; 381 struct spdk_dif_error errblk; 382 spdk_dif_type_t dif_type; 383 bool md_interleaved; 384 struct iovec md_iov; 385 386 dif_type = spdk_bdev_get_dif_type(bdev); 387 md_interleaved = spdk_bdev_is_md_interleaved(bdev); 388 389 if (dif_type == SPDK_DIF_DISABLE) { 390 return; 391 } 392 393 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 394 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 395 rc = spdk_dif_ctx_init(&dif_ctx, 396 spdk_bdev_get_block_size(bdev), 397 spdk_bdev_get_md_size(bdev), 398 md_interleaved, 399 spdk_bdev_is_dif_head_of_md(bdev), 400 dif_type, 401 bdev->dif_check_flags, 402 offset_blocks, 403 0xFFFF, 0x123, 0, 0, &dif_opts); 404 SPDK_CU_ASSERT_FATAL(rc == 0); 405 406 if (!md_interleaved) { 407 md_iov.iov_base = md_buf; 408 md_iov.iov_len = spdk_bdev_get_md_size(bdev) * num_blocks; 409 410 rc = spdk_dix_verify(iovs, iovcnt, 411 &md_iov, num_blocks, &dif_ctx, &errblk); 412 SPDK_CU_ASSERT_FATAL(rc == 0); 413 } 414 } 415 416 /* Store the IO completion status in global variable to verify by various tests */ 417 void 418 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 419 { 420 g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false); 421 422 if (g_io_comp_status && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 423 verify_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 424 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 425 } 426 } 427 428 static void 429 set_io_output(struct io_output *output, 430 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 431 uint64_t offset_blocks, uint64_t num_blocks, 432 spdk_bdev_io_completion_cb cb, void *cb_arg, 433 enum spdk_bdev_io_type iotype, struct iovec *iovs, 434 int iovcnt, void *md) 435 { 436 output->desc = desc; 437 output->ch = ch; 438 output->offset_blocks = offset_blocks; 439 output->num_blocks = num_blocks; 440 output->cb = cb; 441 output->cb_arg = cb_arg; 442 output->iotype = iotype; 443 output->iovs = iovs; 444 output->iovcnt = iovcnt; 445 output->md_buf = md; 446 } 447 448 static void 449 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg) 450 { 451 if (g_bdev_io_defer_completion) { 452 child_io->internal.cb = cb; 453 child_io->internal.caller_ctx = cb_arg; 454 TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link); 455 } else { 456 cb(child_io, g_child_io_status_flag, cb_arg); 457 } 458 } 459 460 static void 461 complete_deferred_ios(void) 462 { 463 struct spdk_bdev_io *child_io, *tmp; 464 465 TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) { 466 TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link); 467 child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx); 468 } 469 } 470 471 /* It will cache the split IOs for verification */ 472 int 473 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 474 struct iovec *iov, int iovcnt, 475 uint64_t offset_blocks, uint64_t num_blocks, 476 spdk_bdev_io_completion_cb cb, void *cb_arg) 477 { 478 return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks, 479 num_blocks, cb, cb_arg, NULL); 480 } 481 482 int 483 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 484 struct iovec *iov, int iovcnt, 485 uint64_t offset_blocks, uint64_t num_blocks, 486 spdk_bdev_io_completion_cb cb, void *cb_arg, 487 struct spdk_bdev_ext_io_opts *opts) 488 { 489 struct io_output *output = &g_io_output[g_io_output_index]; 490 struct spdk_bdev_io *child_io; 491 492 if (g_ignore_io_output) { 493 return 0; 494 } 495 496 if (g_max_io_size < g_strip_size) { 497 SPDK_CU_ASSERT_FATAL(g_io_output_index < 2); 498 } else { 499 SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1); 500 } 501 if (g_bdev_io_submit_status == 0) { 502 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 503 SPDK_BDEV_IO_TYPE_WRITE, iov, iovcnt, opts->metadata); 504 g_io_output_index++; 505 506 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 507 SPDK_CU_ASSERT_FATAL(child_io != NULL); 508 child_io_complete(child_io, cb, cb_arg); 509 } 510 511 return g_bdev_io_submit_status; 512 } 513 514 int 515 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 516 struct iovec *iov, int iovcnt, void *md, 517 uint64_t offset_blocks, uint64_t num_blocks, 518 spdk_bdev_io_completion_cb cb, void *cb_arg) 519 { 520 struct spdk_bdev_ext_io_opts opts = { 521 .metadata = md 522 }; 523 524 return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks, 525 num_blocks, cb, cb_arg, &opts); 526 } 527 528 int 529 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 530 spdk_bdev_io_completion_cb cb, void *cb_arg) 531 { 532 struct io_output *output = &g_io_output[g_io_output_index]; 533 struct spdk_bdev_io *child_io; 534 535 if (g_ignore_io_output) { 536 return 0; 537 } 538 539 if (g_bdev_io_submit_status == 0) { 540 set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET, 541 NULL, 0, NULL); 542 g_io_output_index++; 543 544 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 545 SPDK_CU_ASSERT_FATAL(child_io != NULL); 546 child_io_complete(child_io, cb, cb_arg); 547 } 548 549 return g_bdev_io_submit_status; 550 } 551 552 int 553 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 554 uint64_t offset_blocks, uint64_t num_blocks, 555 spdk_bdev_io_completion_cb cb, void *cb_arg) 556 { 557 struct io_output *output = &g_io_output[g_io_output_index]; 558 struct spdk_bdev_io *child_io; 559 560 if (g_ignore_io_output) { 561 return 0; 562 } 563 564 if (g_bdev_io_submit_status == 0) { 565 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 566 SPDK_BDEV_IO_TYPE_UNMAP, NULL, 0, NULL); 567 g_io_output_index++; 568 569 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 570 SPDK_CU_ASSERT_FATAL(child_io != NULL); 571 child_io_complete(child_io, cb, cb_arg); 572 } 573 574 return g_bdev_io_submit_status; 575 } 576 577 void 578 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno) 579 { 580 CU_ASSERT(bdeverrno == 0); 581 SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL); 582 bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno); 583 } 584 585 int 586 spdk_bdev_register(struct spdk_bdev *bdev) 587 { 588 TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link); 589 return 0; 590 } 591 592 void 593 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 594 { 595 int ret; 596 597 SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev); 598 TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); 599 600 bdev->internal.unregister_cb = cb_fn; 601 bdev->internal.unregister_ctx = cb_arg; 602 603 ret = bdev->fn_table->destruct(bdev->ctxt); 604 CU_ASSERT(ret == 1); 605 606 poll_threads(); 607 } 608 609 int 610 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb, 611 void *event_ctx, struct spdk_bdev_desc **_desc) 612 { 613 struct spdk_bdev *bdev; 614 615 bdev = spdk_bdev_get_by_name(bdev_name); 616 if (bdev == NULL) { 617 return -ENODEV; 618 } 619 620 *_desc = (void *)bdev; 621 return 0; 622 } 623 624 struct spdk_bdev * 625 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 626 { 627 return (void *)desc; 628 } 629 630 int 631 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val) 632 { 633 if (!g_test_multi_raids) { 634 struct rpc_bdev_raid_create *req = g_rpc_req; 635 if (strcmp(name, "strip_size_kb") == 0) { 636 CU_ASSERT(req->strip_size_kb == val); 637 } else if (strcmp(name, "blocklen_shift") == 0) { 638 CU_ASSERT(spdk_u32log2(g_block_len) == val); 639 } else if (strcmp(name, "num_base_bdevs") == 0) { 640 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 641 } else if (strcmp(name, "state") == 0) { 642 CU_ASSERT(val == RAID_BDEV_STATE_ONLINE); 643 } else if (strcmp(name, "destruct_called") == 0) { 644 CU_ASSERT(val == 0); 645 } else if (strcmp(name, "num_base_bdevs_discovered") == 0) { 646 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 647 } 648 } 649 return 0; 650 } 651 652 int 653 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val) 654 { 655 if (g_test_multi_raids) { 656 if (strcmp(name, "name") == 0) { 657 g_get_raids_output[g_get_raids_count] = strdup(val); 658 SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL); 659 g_get_raids_count++; 660 } 661 } else { 662 struct rpc_bdev_raid_create *req = g_rpc_req; 663 if (strcmp(name, "raid_level") == 0) { 664 CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0); 665 } 666 } 667 return 0; 668 } 669 670 int 671 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val) 672 { 673 if (!g_test_multi_raids) { 674 struct rpc_bdev_raid_create *req = g_rpc_req; 675 if (strcmp(name, "superblock") == 0) { 676 CU_ASSERT(val == req->superblock_enabled); 677 } 678 } 679 return 0; 680 } 681 682 void 683 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) 684 { 685 if (bdev_io) { 686 free(bdev_io); 687 } 688 } 689 690 /* It will cache split IOs for verification */ 691 int 692 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 693 struct iovec *iov, int iovcnt, 694 uint64_t offset_blocks, uint64_t num_blocks, 695 spdk_bdev_io_completion_cb cb, void *cb_arg) 696 { 697 return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks, 698 num_blocks, cb, cb_arg, NULL); 699 } 700 701 int 702 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 703 struct iovec *iov, int iovcnt, 704 uint64_t offset_blocks, uint64_t num_blocks, 705 spdk_bdev_io_completion_cb cb, void *cb_arg, 706 struct spdk_bdev_ext_io_opts *opts) 707 { 708 struct io_output *output = &g_io_output[g_io_output_index]; 709 struct spdk_bdev_io *child_io; 710 711 if (g_ignore_io_output) { 712 return 0; 713 } 714 715 SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1); 716 if (g_bdev_io_submit_status == 0) { 717 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 718 SPDK_BDEV_IO_TYPE_READ, iov, iovcnt, opts->metadata); 719 generate_dif(iov, iovcnt, opts->metadata, offset_blocks, num_blocks, 720 spdk_bdev_desc_get_bdev(desc)); 721 g_io_output_index++; 722 723 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 724 SPDK_CU_ASSERT_FATAL(child_io != NULL); 725 child_io_complete(child_io, cb, cb_arg); 726 } 727 728 return g_bdev_io_submit_status; 729 } 730 731 int 732 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 733 struct iovec *iov, int iovcnt, void *md, 734 uint64_t offset_blocks, uint64_t num_blocks, 735 spdk_bdev_io_completion_cb cb, void *cb_arg) 736 { 737 struct spdk_bdev_ext_io_opts opts = { 738 .metadata = md 739 }; 740 741 return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks, 742 num_blocks, cb, cb_arg, &opts); 743 } 744 745 746 void 747 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) 748 { 749 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 750 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 751 bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE; 752 bdev->internal.claim.v1.module = NULL; 753 } 754 755 int 756 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 757 struct spdk_bdev_module *module) 758 { 759 if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) { 760 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 761 return -1; 762 } 763 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 764 bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE; 765 bdev->internal.claim.v1.module = module; 766 return 0; 767 } 768 769 int 770 spdk_json_decode_object(const struct spdk_json_val *values, 771 const struct spdk_json_object_decoder *decoders, size_t num_decoders, 772 void *out) 773 { 774 struct rpc_bdev_raid_create *req, *_out; 775 size_t i; 776 777 if (g_json_decode_obj_err) { 778 return -1; 779 } else if (g_json_decode_obj_create) { 780 req = g_rpc_req; 781 _out = out; 782 783 _out->name = strdup(req->name); 784 SPDK_CU_ASSERT_FATAL(_out->name != NULL); 785 _out->strip_size_kb = req->strip_size_kb; 786 _out->level = req->level; 787 _out->superblock_enabled = req->superblock_enabled; 788 _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs; 789 for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) { 790 _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]); 791 SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]); 792 } 793 } else { 794 memcpy(out, g_rpc_req, g_rpc_req_size); 795 } 796 797 return 0; 798 } 799 800 struct spdk_json_write_ctx * 801 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request) 802 { 803 return (void *)1; 804 } 805 806 void 807 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request, 808 int error_code, const char *msg) 809 { 810 g_rpc_err = 1; 811 } 812 813 void 814 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request, 815 int error_code, const char *fmt, ...) 816 { 817 g_rpc_err = 1; 818 } 819 820 struct spdk_bdev * 821 spdk_bdev_get_by_name(const char *bdev_name) 822 { 823 struct spdk_bdev *bdev; 824 825 if (!TAILQ_EMPTY(&g_bdev_list)) { 826 TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { 827 if (strcmp(bdev_name, bdev->name) == 0) { 828 return bdev; 829 } 830 } 831 } 832 833 return NULL; 834 } 835 836 int 837 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 838 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 839 { 840 if (cb_fn) { 841 cb_fn(cb_arg, 0); 842 } 843 844 return 0; 845 } 846 847 int 848 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 849 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 850 { 851 if (cb_fn) { 852 cb_fn(cb_arg, 0); 853 } 854 855 return 0; 856 } 857 858 int 859 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 860 uint64_t offset, uint64_t length, 861 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 862 { 863 if (cb_fn) { 864 cb_fn(cb_arg, 0); 865 } 866 867 return 0; 868 } 869 870 int 871 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 872 uint64_t offset, uint64_t length, 873 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 874 { 875 if (cb_fn) { 876 cb_fn(cb_arg, 0); 877 } 878 879 return 0; 880 } 881 882 static void 883 bdev_io_cleanup(struct spdk_bdev_io *bdev_io) 884 { 885 if (bdev_io->u.bdev.iovs) { 886 int i; 887 888 for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) { 889 free(bdev_io->u.bdev.iovs[i].iov_base); 890 } 891 free(bdev_io->u.bdev.iovs); 892 } 893 894 free(bdev_io->u.bdev.md_buf); 895 free(bdev_io); 896 } 897 898 static void 899 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, 900 struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype, 901 int iovcnt, size_t iov_len) 902 { 903 struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); 904 int i; 905 906 bdev_io->bdev = bdev; 907 bdev_io->u.bdev.offset_blocks = lba; 908 bdev_io->u.bdev.num_blocks = blocks; 909 bdev_io->type = iotype; 910 bdev_io->internal.ch = channel; 911 bdev_io->u.bdev.iovcnt = iovcnt; 912 913 if (iovcnt == 0) { 914 bdev_io->u.bdev.iovs = NULL; 915 bdev_io->u.bdev.md_buf = NULL; 916 return; 917 } 918 919 SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len); 920 921 bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec)); 922 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL); 923 924 for (i = 0; i < iovcnt; i++) { 925 struct iovec *iov = &bdev_io->u.bdev.iovs[i]; 926 927 iov->iov_base = calloc(1, iov_len); 928 SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL); 929 iov->iov_len = iov_len; 930 } 931 932 if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE && !spdk_bdev_is_md_interleaved(bdev)) { 933 bdev_io->u.bdev.md_buf = calloc(1, blocks * spdk_bdev_get_md_size(bdev)); 934 } 935 } 936 937 static void 938 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev, 939 uint64_t lba, uint64_t blocks, int16_t iotype) 940 { 941 int iovcnt; 942 size_t iov_len; 943 944 if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { 945 iovcnt = 0; 946 iov_len = 0; 947 } else { 948 iovcnt = 1; 949 iov_len = blocks * g_block_len; 950 } 951 952 _bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len); 953 } 954 955 static void 956 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 957 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 958 { 959 uint8_t index = 0; 960 struct io_output *output; 961 962 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 963 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 964 SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT); 965 SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); 966 967 CU_ASSERT(g_io_output_index == num_base_drives); 968 for (index = 0; index < g_io_output_index; index++) { 969 output = &g_io_output[index]; 970 CU_ASSERT(ch_ctx->base_channel[index] == output->ch); 971 CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc); 972 CU_ASSERT(bdev_io->type == output->iotype); 973 } 974 CU_ASSERT(g_io_comp_status == io_status); 975 } 976 977 static void 978 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 979 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 980 { 981 uint32_t strip_shift = spdk_u32log2(g_strip_size); 982 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 983 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 984 strip_shift; 985 uint32_t splits_reqd = (end_strip - start_strip + 1); 986 uint32_t strip; 987 uint64_t pd_strip; 988 uint8_t pd_idx; 989 uint32_t offset_in_strip; 990 uint64_t pd_lba; 991 uint64_t pd_blocks; 992 uint32_t index = 0; 993 struct io_output *output; 994 995 if (io_status == INVALID_IO_SUBMIT) { 996 CU_ASSERT(g_io_comp_status == false); 997 return; 998 } 999 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 1000 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 1001 1002 CU_ASSERT(splits_reqd == g_io_output_index); 1003 for (strip = start_strip; strip <= end_strip; strip++, index++) { 1004 pd_strip = strip / num_base_drives; 1005 pd_idx = strip % num_base_drives; 1006 if (strip == start_strip) { 1007 offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1); 1008 pd_lba = (pd_strip << strip_shift) + offset_in_strip; 1009 if (strip == end_strip) { 1010 pd_blocks = bdev_io->u.bdev.num_blocks; 1011 } else { 1012 pd_blocks = g_strip_size - offset_in_strip; 1013 } 1014 } else if (strip == end_strip) { 1015 pd_lba = pd_strip << strip_shift; 1016 pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) & 1017 (g_strip_size - 1)) + 1; 1018 } else { 1019 pd_lba = pd_strip << raid_bdev->strip_size_shift; 1020 pd_blocks = raid_bdev->strip_size; 1021 } 1022 output = &g_io_output[index]; 1023 CU_ASSERT(pd_lba == output->offset_blocks); 1024 CU_ASSERT(pd_blocks == output->num_blocks); 1025 CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch); 1026 CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc); 1027 CU_ASSERT(bdev_io->type == output->iotype); 1028 if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 1029 verify_dif(output->iovs, output->iovcnt, output->md_buf, 1030 output->offset_blocks, output->num_blocks, 1031 spdk_bdev_desc_get_bdev(raid_bdev->base_bdev_info[pd_idx].desc)); 1032 } 1033 } 1034 CU_ASSERT(g_io_comp_status == io_status); 1035 } 1036 1037 static void 1038 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 1039 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, 1040 uint32_t io_status) 1041 { 1042 uint32_t strip_shift = spdk_u32log2(g_strip_size); 1043 uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size; 1044 uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) % 1045 g_strip_size; 1046 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 1047 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 1048 strip_shift; 1049 uint8_t n_disks_involved; 1050 uint64_t start_strip_disk_idx; 1051 uint64_t end_strip_disk_idx; 1052 uint64_t nblocks_in_start_disk; 1053 uint64_t offset_in_start_disk; 1054 uint8_t disk_idx; 1055 uint64_t base_io_idx; 1056 uint64_t sum_nblocks = 0; 1057 struct io_output *output; 1058 1059 if (io_status == INVALID_IO_SUBMIT) { 1060 CU_ASSERT(g_io_comp_status == false); 1061 return; 1062 } 1063 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 1064 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 1065 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ); 1066 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE); 1067 1068 n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives); 1069 CU_ASSERT(n_disks_involved == g_io_output_index); 1070 1071 start_strip_disk_idx = start_strip % num_base_drives; 1072 end_strip_disk_idx = end_strip % num_base_drives; 1073 1074 offset_in_start_disk = g_io_output[0].offset_blocks; 1075 nblocks_in_start_disk = g_io_output[0].num_blocks; 1076 1077 for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved; 1078 base_io_idx++, disk_idx++) { 1079 uint64_t start_offset_in_disk; 1080 uint64_t end_offset_in_disk; 1081 1082 output = &g_io_output[base_io_idx]; 1083 1084 /* round disk_idx */ 1085 if (disk_idx >= num_base_drives) { 1086 disk_idx %= num_base_drives; 1087 } 1088 1089 /* start_offset_in_disk aligned in strip check: 1090 * The first base io has a same start_offset_in_strip with the whole raid io. 1091 * Other base io should have aligned start_offset_in_strip which is 0. 1092 */ 1093 start_offset_in_disk = output->offset_blocks; 1094 if (base_io_idx == 0) { 1095 CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip); 1096 } else { 1097 CU_ASSERT(start_offset_in_disk % g_strip_size == 0); 1098 } 1099 1100 /* end_offset_in_disk aligned in strip check: 1101 * Base io on disk at which end_strip is located, has a same end_offset_in_strip 1102 * with the whole raid io. 1103 * Other base io should have aligned end_offset_in_strip. 1104 */ 1105 end_offset_in_disk = output->offset_blocks + output->num_blocks - 1; 1106 if (disk_idx == end_strip_disk_idx) { 1107 CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip); 1108 } else { 1109 CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1); 1110 } 1111 1112 /* start_offset_in_disk compared with start_disk. 1113 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk 1114 * mustn't be larger than the start offset of start_offset_in_disk; And the gap 1115 * must be less than strip size. 1116 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk 1117 * must be larger than the start offset of start_offset_in_disk; And the gap mustn't 1118 * be less than strip size. 1119 */ 1120 if (disk_idx > start_strip_disk_idx) { 1121 CU_ASSERT(start_offset_in_disk <= offset_in_start_disk); 1122 CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size); 1123 } else if (disk_idx < start_strip_disk_idx) { 1124 CU_ASSERT(start_offset_in_disk > offset_in_start_disk); 1125 CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size); 1126 } 1127 1128 /* nblocks compared with start_disk: 1129 * The gap between them must be within a strip size. 1130 */ 1131 if (output->num_blocks <= nblocks_in_start_disk) { 1132 CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size); 1133 } else { 1134 CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size); 1135 } 1136 1137 sum_nblocks += output->num_blocks; 1138 1139 CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch); 1140 CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc); 1141 CU_ASSERT(bdev_io->type == output->iotype); 1142 } 1143 1144 /* Sum of each nblocks should be same with raid bdev_io */ 1145 CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks); 1146 1147 CU_ASSERT(g_io_comp_status == io_status); 1148 } 1149 1150 static void 1151 verify_raid_bdev_present(const char *name, bool presence) 1152 { 1153 struct raid_bdev *pbdev; 1154 bool pbdev_found; 1155 1156 pbdev_found = false; 1157 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1158 if (strcmp(pbdev->bdev.name, name) == 0) { 1159 pbdev_found = true; 1160 break; 1161 } 1162 } 1163 if (presence == true) { 1164 CU_ASSERT(pbdev_found == true); 1165 } else { 1166 CU_ASSERT(pbdev_found == false); 1167 } 1168 } 1169 1170 static void 1171 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state) 1172 { 1173 struct raid_bdev *pbdev; 1174 struct raid_base_bdev_info *base_info; 1175 struct spdk_bdev *bdev = NULL; 1176 bool pbdev_found; 1177 uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF; 1178 1179 pbdev_found = false; 1180 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1181 if (strcmp(pbdev->bdev.name, r->name) == 0) { 1182 pbdev_found = true; 1183 if (presence == false) { 1184 break; 1185 } 1186 CU_ASSERT(pbdev->base_bdev_info != NULL); 1187 CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len)); 1188 CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) / 1189 g_block_len))); 1190 CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len)); 1191 CU_ASSERT((uint32_t)pbdev->state == raid_state); 1192 CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs); 1193 CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs); 1194 CU_ASSERT(pbdev->level == r->level); 1195 CU_ASSERT(pbdev->base_bdev_info != NULL); 1196 RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) { 1197 CU_ASSERT(base_info->desc != NULL); 1198 bdev = spdk_bdev_desc_get_bdev(base_info->desc); 1199 CU_ASSERT(bdev != NULL); 1200 CU_ASSERT(base_info->remove_scheduled == false); 1201 CU_ASSERT((pbdev->superblock_enabled && base_info->data_offset != 0) || 1202 (!pbdev->superblock_enabled && base_info->data_offset == 0)); 1203 CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt); 1204 1205 if (bdev && base_info->data_size < min_blockcnt) { 1206 min_blockcnt = base_info->data_size; 1207 } 1208 } 1209 CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) * 1210 (r->strip_size_kb * 1024 / g_block_len)) * 1211 r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt); 1212 CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0); 1213 CU_ASSERT(pbdev->bdev.write_cache == 0); 1214 CU_ASSERT(pbdev->bdev.blocklen == g_block_len); 1215 if (pbdev->num_base_bdevs > 1) { 1216 CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size); 1217 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true); 1218 } else { 1219 CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0); 1220 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false); 1221 } 1222 CU_ASSERT(pbdev->bdev.ctxt == pbdev); 1223 CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table); 1224 CU_ASSERT(pbdev->bdev.module == &g_raid_if); 1225 break; 1226 } 1227 } 1228 if (presence == true) { 1229 CU_ASSERT(pbdev_found == true); 1230 } else { 1231 CU_ASSERT(pbdev_found == false); 1232 } 1233 } 1234 1235 static void 1236 verify_get_raids(struct rpc_bdev_raid_create *construct_req, 1237 uint8_t g_max_raids, 1238 char **g_get_raids_output, uint32_t g_get_raids_count) 1239 { 1240 uint8_t i, j; 1241 bool found; 1242 1243 CU_ASSERT(g_max_raids == g_get_raids_count); 1244 if (g_max_raids == g_get_raids_count) { 1245 for (i = 0; i < g_max_raids; i++) { 1246 found = false; 1247 for (j = 0; j < g_max_raids; j++) { 1248 if (construct_req[i].name && 1249 strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) { 1250 found = true; 1251 break; 1252 } 1253 } 1254 CU_ASSERT(found == true); 1255 } 1256 } 1257 } 1258 1259 static void 1260 create_base_bdevs(uint32_t bbdev_start_idx) 1261 { 1262 uint8_t i; 1263 struct spdk_bdev *base_bdev; 1264 char name[16]; 1265 1266 for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) { 1267 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1"); 1268 base_bdev = calloc(1, sizeof(struct spdk_bdev)); 1269 SPDK_CU_ASSERT_FATAL(base_bdev != NULL); 1270 base_bdev->name = strdup(name); 1271 spdk_uuid_generate(&base_bdev->uuid); 1272 SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL); 1273 base_bdev->blocklen = g_block_len; 1274 base_bdev->blockcnt = BLOCK_CNT; 1275 if (g_enable_dif) { 1276 base_bdev->md_interleave = false; 1277 base_bdev->md_len = MD_SIZE; 1278 base_bdev->dif_check_flags = 1279 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK | 1280 SPDK_DIF_FLAGS_APPTAG_CHECK; 1281 base_bdev->dif_type = SPDK_DIF_TYPE1; 1282 } 1283 TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link); 1284 } 1285 } 1286 1287 static void 1288 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name, 1289 uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled) 1290 { 1291 uint8_t i; 1292 char name[16]; 1293 uint8_t bbdev_idx = bbdev_start_idx; 1294 1295 r->name = strdup(raid_name); 1296 SPDK_CU_ASSERT_FATAL(r->name != NULL); 1297 r->strip_size_kb = (g_strip_size * g_block_len) / 1024; 1298 r->level = RAID0; 1299 r->superblock_enabled = superblock_enabled; 1300 r->base_bdevs.num_base_bdevs = g_max_base_drives; 1301 for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) { 1302 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1"); 1303 r->base_bdevs.base_bdevs[i] = strdup(name); 1304 SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL); 1305 } 1306 if (create_base_bdev == true) { 1307 create_base_bdevs(bbdev_start_idx); 1308 } 1309 g_rpc_req = r; 1310 g_rpc_req_size = sizeof(*r); 1311 } 1312 1313 static void 1314 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name, 1315 uint8_t bbdev_start_idx, bool create_base_bdev, 1316 uint8_t json_decode_obj_err, bool superblock_enabled) 1317 { 1318 create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled); 1319 1320 g_rpc_err = 0; 1321 g_json_decode_obj_create = 1; 1322 g_json_decode_obj_err = json_decode_obj_err; 1323 g_config_level_create = 0; 1324 g_test_multi_raids = 0; 1325 } 1326 1327 static void 1328 free_test_req(struct rpc_bdev_raid_create *r) 1329 { 1330 uint8_t i; 1331 1332 free(r->name); 1333 for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) { 1334 free(r->base_bdevs.base_bdevs[i]); 1335 } 1336 } 1337 1338 static void 1339 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name, 1340 uint8_t json_decode_obj_err) 1341 { 1342 r->name = strdup(raid_name); 1343 SPDK_CU_ASSERT_FATAL(r->name != NULL); 1344 1345 g_rpc_req = r; 1346 g_rpc_req_size = sizeof(*r); 1347 g_rpc_err = 0; 1348 g_json_decode_obj_create = 0; 1349 g_json_decode_obj_err = json_decode_obj_err; 1350 g_config_level_create = 0; 1351 g_test_multi_raids = 0; 1352 } 1353 1354 static void 1355 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category, 1356 uint8_t json_decode_obj_err) 1357 { 1358 r->category = strdup(category); 1359 SPDK_CU_ASSERT_FATAL(r->category != NULL); 1360 1361 g_rpc_req = r; 1362 g_rpc_req_size = sizeof(*r); 1363 g_rpc_err = 0; 1364 g_json_decode_obj_create = 0; 1365 g_json_decode_obj_err = json_decode_obj_err; 1366 g_config_level_create = 0; 1367 g_test_multi_raids = 1; 1368 g_get_raids_count = 0; 1369 } 1370 1371 static void 1372 test_create_raid(void) 1373 { 1374 struct rpc_bdev_raid_create req; 1375 struct rpc_bdev_raid_delete delete_req; 1376 1377 set_globals(); 1378 CU_ASSERT(raid_bdev_init() == 0); 1379 1380 verify_raid_bdev_present("raid1", false); 1381 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1382 rpc_bdev_raid_create(NULL, NULL); 1383 CU_ASSERT(g_rpc_err == 0); 1384 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1385 free_test_req(&req); 1386 1387 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1388 rpc_bdev_raid_delete(NULL, NULL); 1389 CU_ASSERT(g_rpc_err == 0); 1390 raid_bdev_exit(); 1391 base_bdevs_cleanup(); 1392 reset_globals(); 1393 } 1394 1395 static void 1396 test_delete_raid(void) 1397 { 1398 struct rpc_bdev_raid_create construct_req; 1399 struct rpc_bdev_raid_delete delete_req; 1400 1401 set_globals(); 1402 CU_ASSERT(raid_bdev_init() == 0); 1403 1404 verify_raid_bdev_present("raid1", false); 1405 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1406 rpc_bdev_raid_create(NULL, NULL); 1407 CU_ASSERT(g_rpc_err == 0); 1408 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1409 free_test_req(&construct_req); 1410 1411 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1412 rpc_bdev_raid_delete(NULL, NULL); 1413 CU_ASSERT(g_rpc_err == 0); 1414 verify_raid_bdev_present("raid1", false); 1415 1416 raid_bdev_exit(); 1417 base_bdevs_cleanup(); 1418 reset_globals(); 1419 } 1420 1421 static void 1422 test_create_raid_invalid_args(void) 1423 { 1424 struct rpc_bdev_raid_create req; 1425 struct rpc_bdev_raid_delete destroy_req; 1426 struct raid_bdev *raid_bdev; 1427 1428 set_globals(); 1429 CU_ASSERT(raid_bdev_init() == 0); 1430 1431 verify_raid_bdev_present("raid1", false); 1432 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1433 req.level = INVALID_RAID_LEVEL; 1434 rpc_bdev_raid_create(NULL, NULL); 1435 CU_ASSERT(g_rpc_err == 1); 1436 free_test_req(&req); 1437 verify_raid_bdev_present("raid1", false); 1438 1439 create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false); 1440 rpc_bdev_raid_create(NULL, NULL); 1441 CU_ASSERT(g_rpc_err == 1); 1442 free_test_req(&req); 1443 verify_raid_bdev_present("raid1", false); 1444 1445 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1446 req.strip_size_kb = 1231; 1447 rpc_bdev_raid_create(NULL, NULL); 1448 CU_ASSERT(g_rpc_err == 1); 1449 free_test_req(&req); 1450 verify_raid_bdev_present("raid1", false); 1451 1452 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1453 rpc_bdev_raid_create(NULL, NULL); 1454 CU_ASSERT(g_rpc_err == 0); 1455 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1456 free_test_req(&req); 1457 1458 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1459 rpc_bdev_raid_create(NULL, NULL); 1460 CU_ASSERT(g_rpc_err == 1); 1461 free_test_req(&req); 1462 1463 create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false); 1464 rpc_bdev_raid_create(NULL, NULL); 1465 CU_ASSERT(g_rpc_err == 1); 1466 free_test_req(&req); 1467 verify_raid_bdev_present("raid2", false); 1468 1469 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1470 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1471 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1"); 1472 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1473 rpc_bdev_raid_create(NULL, NULL); 1474 CU_ASSERT(g_rpc_err == 1); 1475 free_test_req(&req); 1476 verify_raid_bdev_present("raid2", false); 1477 1478 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1479 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1480 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1"); 1481 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1482 rpc_bdev_raid_create(NULL, NULL); 1483 CU_ASSERT(g_rpc_err == 0); 1484 free_test_req(&req); 1485 verify_raid_bdev_present("raid2", true); 1486 raid_bdev = raid_bdev_find_by_name("raid2"); 1487 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 1488 check_and_remove_raid_bdev(raid_bdev); 1489 1490 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false); 1491 rpc_bdev_raid_create(NULL, NULL); 1492 CU_ASSERT(g_rpc_err == 0); 1493 free_test_req(&req); 1494 verify_raid_bdev_present("raid2", true); 1495 verify_raid_bdev_present("raid1", true); 1496 1497 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1498 rpc_bdev_raid_delete(NULL, NULL); 1499 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1500 rpc_bdev_raid_delete(NULL, NULL); 1501 raid_bdev_exit(); 1502 base_bdevs_cleanup(); 1503 reset_globals(); 1504 } 1505 1506 static void 1507 test_delete_raid_invalid_args(void) 1508 { 1509 struct rpc_bdev_raid_create construct_req; 1510 struct rpc_bdev_raid_delete destroy_req; 1511 1512 set_globals(); 1513 CU_ASSERT(raid_bdev_init() == 0); 1514 1515 verify_raid_bdev_present("raid1", false); 1516 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1517 rpc_bdev_raid_create(NULL, NULL); 1518 CU_ASSERT(g_rpc_err == 0); 1519 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1520 free_test_req(&construct_req); 1521 1522 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1523 rpc_bdev_raid_delete(NULL, NULL); 1524 CU_ASSERT(g_rpc_err == 1); 1525 1526 create_raid_bdev_delete_req(&destroy_req, "raid1", 1); 1527 rpc_bdev_raid_delete(NULL, NULL); 1528 CU_ASSERT(g_rpc_err == 1); 1529 free(destroy_req.name); 1530 verify_raid_bdev_present("raid1", true); 1531 1532 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1533 rpc_bdev_raid_delete(NULL, NULL); 1534 CU_ASSERT(g_rpc_err == 0); 1535 verify_raid_bdev_present("raid1", false); 1536 1537 raid_bdev_exit(); 1538 base_bdevs_cleanup(); 1539 reset_globals(); 1540 } 1541 1542 static void 1543 test_io_channel(void) 1544 { 1545 struct rpc_bdev_raid_create req; 1546 struct rpc_bdev_raid_delete destroy_req; 1547 struct raid_bdev *pbdev; 1548 struct spdk_io_channel *ch; 1549 struct raid_bdev_io_channel *ch_ctx; 1550 1551 set_globals(); 1552 CU_ASSERT(raid_bdev_init() == 0); 1553 1554 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1555 verify_raid_bdev_present("raid1", false); 1556 rpc_bdev_raid_create(NULL, NULL); 1557 CU_ASSERT(g_rpc_err == 0); 1558 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1559 1560 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1561 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1562 break; 1563 } 1564 } 1565 CU_ASSERT(pbdev != NULL); 1566 1567 ch = spdk_get_io_channel(pbdev); 1568 SPDK_CU_ASSERT_FATAL(ch != NULL); 1569 1570 ch_ctx = spdk_io_channel_get_ctx(ch); 1571 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1572 1573 free_test_req(&req); 1574 1575 spdk_put_io_channel(ch); 1576 1577 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1578 rpc_bdev_raid_delete(NULL, NULL); 1579 CU_ASSERT(g_rpc_err == 0); 1580 verify_raid_bdev_present("raid1", false); 1581 1582 raid_bdev_exit(); 1583 base_bdevs_cleanup(); 1584 reset_globals(); 1585 } 1586 1587 static void 1588 test_write_io(void) 1589 { 1590 struct rpc_bdev_raid_create req; 1591 struct rpc_bdev_raid_delete destroy_req; 1592 struct raid_bdev *pbdev; 1593 struct spdk_io_channel *ch; 1594 struct raid_bdev_io_channel *ch_ctx; 1595 uint8_t i; 1596 struct spdk_bdev_io *bdev_io; 1597 uint64_t io_len; 1598 uint64_t lba = 0; 1599 1600 set_globals(); 1601 CU_ASSERT(raid_bdev_init() == 0); 1602 1603 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1604 verify_raid_bdev_present("raid1", false); 1605 rpc_bdev_raid_create(NULL, NULL); 1606 CU_ASSERT(g_rpc_err == 0); 1607 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1608 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1609 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1610 break; 1611 } 1612 } 1613 CU_ASSERT(pbdev != NULL); 1614 1615 ch = spdk_get_io_channel(pbdev); 1616 SPDK_CU_ASSERT_FATAL(ch != NULL); 1617 1618 ch_ctx = spdk_io_channel_get_ctx(ch); 1619 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1620 1621 /* test 2 IO sizes based on global strip size set earlier */ 1622 for (i = 0; i < 2; i++) { 1623 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1624 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1625 io_len = (g_strip_size / 2) << i; 1626 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1627 lba += g_strip_size; 1628 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1629 g_io_output_index = 0; 1630 generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 1631 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 1632 raid_bdev_submit_request(ch, bdev_io); 1633 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1634 g_child_io_status_flag); 1635 bdev_io_cleanup(bdev_io); 1636 } 1637 1638 free_test_req(&req); 1639 spdk_put_io_channel(ch); 1640 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1641 rpc_bdev_raid_delete(NULL, NULL); 1642 CU_ASSERT(g_rpc_err == 0); 1643 verify_raid_bdev_present("raid1", false); 1644 1645 raid_bdev_exit(); 1646 base_bdevs_cleanup(); 1647 reset_globals(); 1648 } 1649 1650 static void 1651 test_read_io(void) 1652 { 1653 struct rpc_bdev_raid_create req; 1654 struct rpc_bdev_raid_delete destroy_req; 1655 struct raid_bdev *pbdev; 1656 struct spdk_io_channel *ch; 1657 struct raid_bdev_io_channel *ch_ctx; 1658 uint8_t i; 1659 struct spdk_bdev_io *bdev_io; 1660 uint64_t io_len; 1661 uint64_t lba; 1662 1663 set_globals(); 1664 CU_ASSERT(raid_bdev_init() == 0); 1665 1666 verify_raid_bdev_present("raid1", false); 1667 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1668 rpc_bdev_raid_create(NULL, NULL); 1669 CU_ASSERT(g_rpc_err == 0); 1670 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1671 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1672 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1673 break; 1674 } 1675 } 1676 CU_ASSERT(pbdev != NULL); 1677 1678 ch = spdk_get_io_channel(pbdev); 1679 SPDK_CU_ASSERT_FATAL(ch != NULL); 1680 1681 ch_ctx = spdk_io_channel_get_ctx(ch); 1682 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1683 1684 /* test 2 IO sizes based on global strip size set earlier */ 1685 lba = 0; 1686 for (i = 0; i < 2; i++) { 1687 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1688 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1689 io_len = (g_strip_size / 2) << i; 1690 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ); 1691 lba += g_strip_size; 1692 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1693 g_io_output_index = 0; 1694 raid_bdev_submit_request(ch, bdev_io); 1695 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1696 g_child_io_status_flag); 1697 bdev_io_cleanup(bdev_io); 1698 } 1699 1700 free_test_req(&req); 1701 spdk_put_io_channel(ch); 1702 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1703 rpc_bdev_raid_delete(NULL, NULL); 1704 CU_ASSERT(g_rpc_err == 0); 1705 verify_raid_bdev_present("raid1", false); 1706 1707 raid_bdev_exit(); 1708 base_bdevs_cleanup(); 1709 reset_globals(); 1710 } 1711 1712 static void 1713 raid_bdev_io_generate_by_strips(uint64_t n_strips) 1714 { 1715 uint64_t lba; 1716 uint64_t nblocks; 1717 uint64_t start_offset; 1718 uint64_t end_offset; 1719 uint64_t offsets_in_strip[3]; 1720 uint64_t start_bdev_idx; 1721 uint64_t start_bdev_offset; 1722 uint64_t start_bdev_idxs[3]; 1723 int i, j, l; 1724 1725 /* 3 different situations of offset in strip */ 1726 offsets_in_strip[0] = 0; 1727 offsets_in_strip[1] = g_strip_size >> 1; 1728 offsets_in_strip[2] = g_strip_size - 1; 1729 1730 /* 3 different situations of start_bdev_idx */ 1731 start_bdev_idxs[0] = 0; 1732 start_bdev_idxs[1] = g_max_base_drives >> 1; 1733 start_bdev_idxs[2] = g_max_base_drives - 1; 1734 1735 /* consider different offset in strip */ 1736 for (i = 0; i < 3; i++) { 1737 start_offset = offsets_in_strip[i]; 1738 for (j = 0; j < 3; j++) { 1739 end_offset = offsets_in_strip[j]; 1740 if (n_strips == 1 && start_offset > end_offset) { 1741 continue; 1742 } 1743 1744 /* consider at which base_bdev lba is started. */ 1745 for (l = 0; l < 3; l++) { 1746 start_bdev_idx = start_bdev_idxs[l]; 1747 start_bdev_offset = start_bdev_idx * g_strip_size; 1748 lba = g_lba_offset + start_bdev_offset + start_offset; 1749 nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1; 1750 1751 g_io_ranges[g_io_range_idx].lba = lba; 1752 g_io_ranges[g_io_range_idx].nblocks = nblocks; 1753 1754 SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE); 1755 g_io_range_idx++; 1756 } 1757 } 1758 } 1759 } 1760 1761 static void 1762 raid_bdev_io_generate(void) 1763 { 1764 uint64_t n_strips; 1765 uint64_t n_strips_span = g_max_base_drives; 1766 uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1, 1767 g_max_base_drives * 2, g_max_base_drives * 3, 1768 g_max_base_drives * 4 1769 }; 1770 uint32_t i; 1771 1772 g_io_range_idx = 0; 1773 1774 /* consider different number of strips from 1 to strips spanned base bdevs, 1775 * and even to times of strips spanned base bdevs 1776 */ 1777 for (n_strips = 1; n_strips < n_strips_span; n_strips++) { 1778 raid_bdev_io_generate_by_strips(n_strips); 1779 } 1780 1781 for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) { 1782 n_strips = n_strips_times[i]; 1783 raid_bdev_io_generate_by_strips(n_strips); 1784 } 1785 } 1786 1787 static void 1788 test_unmap_io(void) 1789 { 1790 struct rpc_bdev_raid_create req; 1791 struct rpc_bdev_raid_delete destroy_req; 1792 struct raid_bdev *pbdev; 1793 struct spdk_io_channel *ch; 1794 struct raid_bdev_io_channel *ch_ctx; 1795 struct spdk_bdev_io *bdev_io; 1796 uint32_t count; 1797 uint64_t io_len; 1798 uint64_t lba; 1799 1800 set_globals(); 1801 CU_ASSERT(raid_bdev_init() == 0); 1802 1803 verify_raid_bdev_present("raid1", false); 1804 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1805 rpc_bdev_raid_create(NULL, NULL); 1806 CU_ASSERT(g_rpc_err == 0); 1807 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1808 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1809 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1810 break; 1811 } 1812 } 1813 CU_ASSERT(pbdev != NULL); 1814 1815 ch = spdk_get_io_channel(pbdev); 1816 SPDK_CU_ASSERT_FATAL(ch != NULL); 1817 1818 ch_ctx = spdk_io_channel_get_ctx(ch); 1819 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1820 1821 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true); 1822 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true); 1823 1824 raid_bdev_io_generate(); 1825 for (count = 0; count < g_io_range_idx; count++) { 1826 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1827 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1828 io_len = g_io_ranges[count].nblocks; 1829 lba = g_io_ranges[count].lba; 1830 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP); 1831 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1832 g_io_output_index = 0; 1833 raid_bdev_submit_request(ch, bdev_io); 1834 verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1835 g_child_io_status_flag); 1836 bdev_io_cleanup(bdev_io); 1837 } 1838 1839 free_test_req(&req); 1840 spdk_put_io_channel(ch); 1841 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1842 rpc_bdev_raid_delete(NULL, NULL); 1843 CU_ASSERT(g_rpc_err == 0); 1844 verify_raid_bdev_present("raid1", false); 1845 1846 raid_bdev_exit(); 1847 base_bdevs_cleanup(); 1848 reset_globals(); 1849 } 1850 1851 /* Test IO failures */ 1852 static void 1853 test_io_failure(void) 1854 { 1855 struct rpc_bdev_raid_create req; 1856 struct rpc_bdev_raid_delete destroy_req; 1857 struct raid_bdev *pbdev; 1858 struct spdk_io_channel *ch; 1859 struct raid_bdev_io_channel *ch_ctx; 1860 struct spdk_bdev_io *bdev_io; 1861 uint32_t count; 1862 uint64_t io_len; 1863 uint64_t lba; 1864 1865 set_globals(); 1866 CU_ASSERT(raid_bdev_init() == 0); 1867 1868 verify_raid_bdev_present("raid1", false); 1869 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1870 rpc_bdev_raid_create(NULL, NULL); 1871 CU_ASSERT(g_rpc_err == 0); 1872 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1873 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1874 if (strcmp(pbdev->bdev.name, req.name) == 0) { 1875 break; 1876 } 1877 } 1878 CU_ASSERT(pbdev != NULL); 1879 1880 ch = spdk_get_io_channel(pbdev); 1881 SPDK_CU_ASSERT_FATAL(ch != NULL); 1882 1883 ch_ctx = spdk_io_channel_get_ctx(ch); 1884 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1885 1886 lba = 0; 1887 for (count = 0; count < 1; count++) { 1888 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1889 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1890 io_len = (g_strip_size / 2) << count; 1891 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID); 1892 lba += g_strip_size; 1893 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1894 g_io_output_index = 0; 1895 raid_bdev_submit_request(ch, bdev_io); 1896 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1897 INVALID_IO_SUBMIT); 1898 bdev_io_cleanup(bdev_io); 1899 } 1900 1901 1902 lba = 0; 1903 g_child_io_status_flag = false; 1904 for (count = 0; count < 1; count++) { 1905 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1906 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1907 io_len = (g_strip_size / 2) << count; 1908 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1909 lba += g_strip_size; 1910 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1911 g_io_output_index = 0; 1912 generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 1913 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 1914 raid_bdev_submit_request(ch, bdev_io); 1915 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1916 g_child_io_status_flag); 1917 bdev_io_cleanup(bdev_io); 1918 } 1919 1920 free_test_req(&req); 1921 spdk_put_io_channel(ch); 1922 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1923 rpc_bdev_raid_delete(NULL, NULL); 1924 CU_ASSERT(g_rpc_err == 0); 1925 verify_raid_bdev_present("raid1", false); 1926 1927 raid_bdev_exit(); 1928 base_bdevs_cleanup(); 1929 reset_globals(); 1930 } 1931 1932 /* Test reset IO */ 1933 static void 1934 test_reset_io(void) 1935 { 1936 struct rpc_bdev_raid_create req; 1937 struct rpc_bdev_raid_delete destroy_req; 1938 struct raid_bdev *pbdev; 1939 struct spdk_io_channel *ch; 1940 struct raid_bdev_io_channel *ch_ctx; 1941 struct spdk_bdev_io *bdev_io; 1942 1943 set_globals(); 1944 CU_ASSERT(raid_bdev_init() == 0); 1945 1946 verify_raid_bdev_present("raid1", false); 1947 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1948 rpc_bdev_raid_create(NULL, NULL); 1949 CU_ASSERT(g_rpc_err == 0); 1950 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1951 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1952 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1953 break; 1954 } 1955 } 1956 CU_ASSERT(pbdev != NULL); 1957 1958 ch = spdk_get_io_channel(pbdev); 1959 SPDK_CU_ASSERT_FATAL(ch != NULL); 1960 1961 ch_ctx = spdk_io_channel_get_ctx(ch); 1962 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1963 1964 g_bdev_io_submit_status = 0; 1965 g_child_io_status_flag = true; 1966 1967 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true); 1968 1969 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1970 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1971 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET); 1972 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1973 g_io_output_index = 0; 1974 raid_bdev_submit_request(ch, bdev_io); 1975 verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1976 true); 1977 bdev_io_cleanup(bdev_io); 1978 1979 free_test_req(&req); 1980 spdk_put_io_channel(ch); 1981 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1982 rpc_bdev_raid_delete(NULL, NULL); 1983 CU_ASSERT(g_rpc_err == 0); 1984 verify_raid_bdev_present("raid1", false); 1985 1986 raid_bdev_exit(); 1987 base_bdevs_cleanup(); 1988 reset_globals(); 1989 } 1990 1991 /* Create multiple raids, destroy raids without IO, get_raids related tests */ 1992 static void 1993 test_multi_raid_no_io(void) 1994 { 1995 struct rpc_bdev_raid_create *construct_req; 1996 struct rpc_bdev_raid_delete destroy_req; 1997 struct rpc_bdev_raid_get_bdevs get_raids_req; 1998 uint8_t i; 1999 char name[16]; 2000 uint8_t bbdev_idx = 0; 2001 2002 set_globals(); 2003 construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create)); 2004 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 2005 CU_ASSERT(raid_bdev_init() == 0); 2006 for (i = 0; i < g_max_raids; i++) { 2007 snprintf(name, 16, "%s%u", "raid", i); 2008 verify_raid_bdev_present(name, false); 2009 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 2010 bbdev_idx += g_max_base_drives; 2011 rpc_bdev_raid_create(NULL, NULL); 2012 CU_ASSERT(g_rpc_err == 0); 2013 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 2014 } 2015 2016 create_get_raids_req(&get_raids_req, "all", 0); 2017 rpc_bdev_raid_get_bdevs(NULL, NULL); 2018 CU_ASSERT(g_rpc_err == 0); 2019 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 2020 for (i = 0; i < g_get_raids_count; i++) { 2021 free(g_get_raids_output[i]); 2022 } 2023 2024 create_get_raids_req(&get_raids_req, "online", 0); 2025 rpc_bdev_raid_get_bdevs(NULL, NULL); 2026 CU_ASSERT(g_rpc_err == 0); 2027 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 2028 for (i = 0; i < g_get_raids_count; i++) { 2029 free(g_get_raids_output[i]); 2030 } 2031 2032 create_get_raids_req(&get_raids_req, "configuring", 0); 2033 rpc_bdev_raid_get_bdevs(NULL, NULL); 2034 CU_ASSERT(g_rpc_err == 0); 2035 CU_ASSERT(g_get_raids_count == 0); 2036 2037 create_get_raids_req(&get_raids_req, "offline", 0); 2038 rpc_bdev_raid_get_bdevs(NULL, NULL); 2039 CU_ASSERT(g_rpc_err == 0); 2040 CU_ASSERT(g_get_raids_count == 0); 2041 2042 create_get_raids_req(&get_raids_req, "invalid_category", 0); 2043 rpc_bdev_raid_get_bdevs(NULL, NULL); 2044 CU_ASSERT(g_rpc_err == 1); 2045 CU_ASSERT(g_get_raids_count == 0); 2046 2047 create_get_raids_req(&get_raids_req, "all", 1); 2048 rpc_bdev_raid_get_bdevs(NULL, NULL); 2049 CU_ASSERT(g_rpc_err == 1); 2050 free(get_raids_req.category); 2051 CU_ASSERT(g_get_raids_count == 0); 2052 2053 create_get_raids_req(&get_raids_req, "all", 0); 2054 rpc_bdev_raid_get_bdevs(NULL, NULL); 2055 CU_ASSERT(g_rpc_err == 0); 2056 CU_ASSERT(g_get_raids_count == g_max_raids); 2057 for (i = 0; i < g_get_raids_count; i++) { 2058 free(g_get_raids_output[i]); 2059 } 2060 2061 for (i = 0; i < g_max_raids; i++) { 2062 SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL); 2063 snprintf(name, 16, "%s", construct_req[i].name); 2064 create_raid_bdev_delete_req(&destroy_req, name, 0); 2065 rpc_bdev_raid_delete(NULL, NULL); 2066 CU_ASSERT(g_rpc_err == 0); 2067 verify_raid_bdev_present(name, false); 2068 } 2069 raid_bdev_exit(); 2070 for (i = 0; i < g_max_raids; i++) { 2071 free_test_req(&construct_req[i]); 2072 } 2073 free(construct_req); 2074 base_bdevs_cleanup(); 2075 reset_globals(); 2076 } 2077 2078 /* Create multiple raids, fire IOs on raids */ 2079 static void 2080 test_multi_raid_with_io(void) 2081 { 2082 struct rpc_bdev_raid_create *construct_req; 2083 struct rpc_bdev_raid_delete destroy_req; 2084 uint8_t i; 2085 char name[16]; 2086 uint8_t bbdev_idx = 0; 2087 struct raid_bdev *pbdev; 2088 struct spdk_io_channel **channels; 2089 struct spdk_bdev_io *bdev_io; 2090 uint64_t io_len; 2091 uint64_t lba = 0; 2092 int16_t iotype; 2093 2094 set_globals(); 2095 construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create)); 2096 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 2097 CU_ASSERT(raid_bdev_init() == 0); 2098 channels = calloc(g_max_raids, sizeof(*channels)); 2099 SPDK_CU_ASSERT_FATAL(channels != NULL); 2100 2101 for (i = 0; i < g_max_raids; i++) { 2102 snprintf(name, 16, "%s%u", "raid", i); 2103 verify_raid_bdev_present(name, false); 2104 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 2105 bbdev_idx += g_max_base_drives; 2106 rpc_bdev_raid_create(NULL, NULL); 2107 CU_ASSERT(g_rpc_err == 0); 2108 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 2109 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2110 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 2111 break; 2112 } 2113 } 2114 CU_ASSERT(pbdev != NULL); 2115 2116 channels[i] = spdk_get_io_channel(pbdev); 2117 SPDK_CU_ASSERT_FATAL(channels[i] != NULL); 2118 } 2119 2120 /* This will perform a write on the first raid and a read on the second. It can be 2121 * expanded in the future to perform r/w on each raid device in the event that 2122 * multiple raid levels are supported. 2123 */ 2124 for (i = 0; i < g_max_raids; i++) { 2125 struct spdk_io_channel *ch = channels[i]; 2126 struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch); 2127 2128 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 2129 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 2130 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 2131 io_len = g_strip_size; 2132 iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ; 2133 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2134 g_io_output_index = 0; 2135 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2136 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 2137 break; 2138 } 2139 } 2140 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype); 2141 CU_ASSERT(pbdev != NULL); 2142 if (iotype == SPDK_BDEV_IO_TYPE_WRITE) { 2143 generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 2144 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 2145 } 2146 raid_bdev_submit_request(ch, bdev_io); 2147 verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev, 2148 g_child_io_status_flag); 2149 bdev_io_cleanup(bdev_io); 2150 } 2151 2152 for (i = 0; i < g_max_raids; i++) { 2153 spdk_put_io_channel(channels[i]); 2154 snprintf(name, 16, "%s", construct_req[i].name); 2155 create_raid_bdev_delete_req(&destroy_req, name, 0); 2156 rpc_bdev_raid_delete(NULL, NULL); 2157 CU_ASSERT(g_rpc_err == 0); 2158 verify_raid_bdev_present(name, false); 2159 } 2160 raid_bdev_exit(); 2161 for (i = 0; i < g_max_raids; i++) { 2162 free_test_req(&construct_req[i]); 2163 } 2164 free(construct_req); 2165 free(channels); 2166 base_bdevs_cleanup(); 2167 reset_globals(); 2168 } 2169 2170 static void 2171 test_io_type_supported(void) 2172 { 2173 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true); 2174 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true); 2175 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false); 2176 } 2177 2178 static void 2179 test_raid_json_dump_info(void) 2180 { 2181 struct rpc_bdev_raid_create req; 2182 struct rpc_bdev_raid_delete destroy_req; 2183 struct raid_bdev *pbdev; 2184 2185 set_globals(); 2186 CU_ASSERT(raid_bdev_init() == 0); 2187 2188 verify_raid_bdev_present("raid1", false); 2189 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 2190 rpc_bdev_raid_create(NULL, NULL); 2191 CU_ASSERT(g_rpc_err == 0); 2192 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2193 2194 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2195 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 2196 break; 2197 } 2198 } 2199 CU_ASSERT(pbdev != NULL); 2200 2201 CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0); 2202 2203 free_test_req(&req); 2204 2205 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2206 rpc_bdev_raid_delete(NULL, NULL); 2207 CU_ASSERT(g_rpc_err == 0); 2208 verify_raid_bdev_present("raid1", false); 2209 2210 raid_bdev_exit(); 2211 base_bdevs_cleanup(); 2212 reset_globals(); 2213 } 2214 2215 static void 2216 test_context_size(void) 2217 { 2218 CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io)); 2219 } 2220 2221 static void 2222 test_raid_level_conversions(void) 2223 { 2224 const char *raid_str; 2225 2226 CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL); 2227 CU_ASSERT(raid_bdev_str_to_level("0") == RAID0); 2228 CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0); 2229 CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0); 2230 2231 raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL); 2232 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 2233 raid_str = raid_bdev_level_to_str(1234); 2234 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 2235 raid_str = raid_bdev_level_to_str(RAID0); 2236 CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0); 2237 } 2238 2239 static void 2240 test_create_raid_superblock(void) 2241 { 2242 struct rpc_bdev_raid_create req; 2243 struct rpc_bdev_raid_delete delete_req; 2244 2245 set_globals(); 2246 CU_ASSERT(raid_bdev_init() == 0); 2247 2248 verify_raid_bdev_present("raid1", false); 2249 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true); 2250 rpc_bdev_raid_create(NULL, NULL); 2251 CU_ASSERT(g_rpc_err == 0); 2252 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2253 free_test_req(&req); 2254 2255 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 2256 rpc_bdev_raid_delete(NULL, NULL); 2257 CU_ASSERT(g_rpc_err == 0); 2258 raid_bdev_exit(); 2259 base_bdevs_cleanup(); 2260 reset_globals(); 2261 } 2262 2263 static void 2264 complete_process_request(void *ctx) 2265 { 2266 struct raid_bdev_process_request *process_req = ctx; 2267 2268 raid_bdev_process_request_complete(process_req, 0); 2269 } 2270 2271 static int 2272 submit_process_request(struct raid_bdev_process_request *process_req, 2273 struct raid_bdev_io_channel *raid_ch) 2274 { 2275 struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch)); 2276 2277 *(uint64_t *)raid_bdev->module_private += process_req->num_blocks; 2278 2279 spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req); 2280 2281 return process_req->num_blocks; 2282 } 2283 2284 static void 2285 test_raid_process(void) 2286 { 2287 struct rpc_bdev_raid_create req; 2288 struct rpc_bdev_raid_delete destroy_req; 2289 struct raid_bdev *pbdev; 2290 struct spdk_bdev *base_bdev; 2291 struct spdk_thread *process_thread; 2292 uint64_t num_blocks_processed = 0; 2293 2294 set_globals(); 2295 CU_ASSERT(raid_bdev_init() == 0); 2296 2297 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 2298 verify_raid_bdev_present("raid1", false); 2299 TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) { 2300 base_bdev->blockcnt = 128; 2301 } 2302 rpc_bdev_raid_create(NULL, NULL); 2303 CU_ASSERT(g_rpc_err == 0); 2304 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2305 free_test_req(&req); 2306 2307 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2308 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 2309 break; 2310 } 2311 } 2312 CU_ASSERT(pbdev != NULL); 2313 2314 pbdev->module->submit_process_request = submit_process_request; 2315 pbdev->module_private = &num_blocks_processed; 2316 2317 CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0); 2318 poll_threads(); 2319 2320 SPDK_CU_ASSERT_FATAL(pbdev->process != NULL); 2321 2322 process_thread = spdk_thread_get_by_id(spdk_thread_get_id(spdk_get_thread()) + 1); 2323 2324 while (spdk_thread_poll(process_thread, 0, 0) > 0) { 2325 poll_threads(); 2326 } 2327 2328 CU_ASSERT(pbdev->process == NULL); 2329 CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt); 2330 2331 poll_threads(); 2332 2333 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2334 rpc_bdev_raid_delete(NULL, NULL); 2335 CU_ASSERT(g_rpc_err == 0); 2336 verify_raid_bdev_present("raid1", false); 2337 2338 raid_bdev_exit(); 2339 base_bdevs_cleanup(); 2340 reset_globals(); 2341 } 2342 2343 static void 2344 test_raid_io_split(void) 2345 { 2346 struct rpc_bdev_raid_create req; 2347 struct rpc_bdev_raid_delete destroy_req; 2348 struct raid_bdev *pbdev; 2349 struct spdk_io_channel *ch; 2350 struct raid_bdev_io_channel *raid_ch; 2351 struct spdk_bdev_io *bdev_io; 2352 struct raid_bdev_io *raid_io; 2353 uint64_t split_offset; 2354 struct iovec iovs_orig[4]; 2355 struct raid_bdev_process process = { }; 2356 2357 set_globals(); 2358 CU_ASSERT(raid_bdev_init() == 0); 2359 2360 verify_raid_bdev_present("raid1", false); 2361 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 2362 rpc_bdev_raid_create(NULL, NULL); 2363 CU_ASSERT(g_rpc_err == 0); 2364 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2365 2366 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2367 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 2368 break; 2369 } 2370 } 2371 CU_ASSERT(pbdev != NULL); 2372 pbdev->bdev.md_len = 8; 2373 2374 process.raid_bdev = pbdev; 2375 process.target = &pbdev->base_bdev_info[0]; 2376 pbdev->process = &process; 2377 ch = spdk_get_io_channel(pbdev); 2378 SPDK_CU_ASSERT_FATAL(ch != NULL); 2379 raid_ch = spdk_io_channel_get_ctx(ch); 2380 g_bdev_io_defer_completion = true; 2381 2382 /* test split of bdev_io with 1 iovec */ 2383 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 2384 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 2385 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 2386 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE); 2387 memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt); 2388 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2389 g_io_output_index = 0; 2390 2391 split_offset = 1; 2392 raid_ch->process.offset = split_offset; 2393 generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 2394 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 2395 raid_bdev_submit_request(ch, bdev_io); 2396 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2397 CU_ASSERT(raid_io->offset_blocks == split_offset); 2398 CU_ASSERT(raid_io->iovcnt == 1); 2399 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2400 CU_ASSERT(raid_io->iovs == raid_io->split.iov); 2401 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len); 2402 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len); 2403 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2404 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2405 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2406 } 2407 complete_deferred_ios(); 2408 CU_ASSERT(raid_io->num_blocks == split_offset); 2409 CU_ASSERT(raid_io->offset_blocks == 0); 2410 CU_ASSERT(raid_io->iovcnt == 1); 2411 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base); 2412 CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len); 2413 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2414 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2415 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2416 } 2417 complete_deferred_ios(); 2418 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2419 CU_ASSERT(raid_io->offset_blocks == 0); 2420 CU_ASSERT(raid_io->iovcnt == 1); 2421 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base); 2422 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len); 2423 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2424 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2425 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2426 } 2427 2428 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2429 CU_ASSERT(g_io_output_index == 2); 2430 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2431 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2432 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2433 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2434 bdev_io_cleanup(bdev_io); 2435 2436 /* test split of bdev_io with 4 iovecs */ 2437 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 2438 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 2439 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 2440 _bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE, 2441 4, g_strip_size / 4 * g_block_len); 2442 memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt); 2443 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2444 g_io_output_index = 0; 2445 2446 split_offset = 1; /* split at the first iovec */ 2447 raid_ch->process.offset = split_offset; 2448 generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf, 2449 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev); 2450 raid_bdev_submit_request(ch, bdev_io); 2451 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2452 CU_ASSERT(raid_io->offset_blocks == split_offset); 2453 CU_ASSERT(raid_io->iovcnt == 4); 2454 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]); 2455 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]); 2456 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len); 2457 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len - g_block_len); 2458 CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0); 2459 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2460 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2461 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2462 } 2463 complete_deferred_ios(); 2464 CU_ASSERT(raid_io->num_blocks == split_offset); 2465 CU_ASSERT(raid_io->offset_blocks == 0); 2466 CU_ASSERT(raid_io->iovcnt == 1); 2467 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2468 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base); 2469 CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len); 2470 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2471 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2472 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2473 } 2474 complete_deferred_ios(); 2475 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2476 CU_ASSERT(raid_io->offset_blocks == 0); 2477 CU_ASSERT(raid_io->iovcnt == 4); 2478 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2479 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2480 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2481 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2482 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2483 } 2484 2485 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2486 CU_ASSERT(g_io_output_index == 2); 2487 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2488 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2489 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2490 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2491 2492 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2493 g_io_output_index = 0; 2494 2495 split_offset = g_strip_size / 2; /* split exactly between second and third iovec */ 2496 raid_ch->process.offset = split_offset; 2497 raid_bdev_submit_request(ch, bdev_io); 2498 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2499 CU_ASSERT(raid_io->offset_blocks == split_offset); 2500 CU_ASSERT(raid_io->iovcnt == 2); 2501 CU_ASSERT(raid_io->split.iov == NULL); 2502 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]); 2503 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2504 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2505 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2506 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2507 } 2508 complete_deferred_ios(); 2509 CU_ASSERT(raid_io->num_blocks == split_offset); 2510 CU_ASSERT(raid_io->offset_blocks == 0); 2511 CU_ASSERT(raid_io->iovcnt == 2); 2512 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2513 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2514 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2515 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2516 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2517 } 2518 complete_deferred_ios(); 2519 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2520 CU_ASSERT(raid_io->offset_blocks == 0); 2521 CU_ASSERT(raid_io->iovcnt == 4); 2522 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2523 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2524 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2525 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2526 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2527 } 2528 2529 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2530 CU_ASSERT(g_io_output_index == 2); 2531 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2532 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2533 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2534 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2535 2536 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2537 g_io_output_index = 0; 2538 2539 split_offset = g_strip_size / 2 + 1; /* split at the third iovec */ 2540 raid_ch->process.offset = split_offset; 2541 raid_bdev_submit_request(ch, bdev_io); 2542 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2543 CU_ASSERT(raid_io->offset_blocks == split_offset); 2544 CU_ASSERT(raid_io->iovcnt == 2); 2545 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]); 2546 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]); 2547 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len); 2548 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len); 2549 CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base); 2550 CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len); 2551 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2552 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2553 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2554 } 2555 complete_deferred_ios(); 2556 CU_ASSERT(raid_io->num_blocks == split_offset); 2557 CU_ASSERT(raid_io->offset_blocks == 0); 2558 CU_ASSERT(raid_io->iovcnt == 3); 2559 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2560 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0); 2561 CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base); 2562 CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len); 2563 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2564 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2565 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2566 } 2567 complete_deferred_ios(); 2568 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2569 CU_ASSERT(raid_io->offset_blocks == 0); 2570 CU_ASSERT(raid_io->iovcnt == 4); 2571 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2572 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2573 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2574 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2575 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2576 } 2577 2578 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2579 CU_ASSERT(g_io_output_index == 2); 2580 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2581 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2582 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2583 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2584 2585 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2586 g_io_output_index = 0; 2587 2588 split_offset = g_strip_size - 1; /* split at the last iovec */ 2589 raid_ch->process.offset = split_offset; 2590 raid_bdev_submit_request(ch, bdev_io); 2591 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2592 CU_ASSERT(raid_io->offset_blocks == split_offset); 2593 CU_ASSERT(raid_io->iovcnt == 1); 2594 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]); 2595 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]); 2596 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len); 2597 CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len); 2598 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2599 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2600 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2601 } 2602 complete_deferred_ios(); 2603 CU_ASSERT(raid_io->num_blocks == split_offset); 2604 CU_ASSERT(raid_io->offset_blocks == 0); 2605 CU_ASSERT(raid_io->iovcnt == 4); 2606 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2607 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0); 2608 CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base); 2609 CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len); 2610 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2611 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2612 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2613 } 2614 complete_deferred_ios(); 2615 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2616 CU_ASSERT(raid_io->offset_blocks == 0); 2617 CU_ASSERT(raid_io->iovcnt == 4); 2618 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2619 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2620 if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE && 2621 !spdk_bdev_is_md_interleaved(&pbdev->bdev)) { 2622 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2623 } 2624 2625 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2626 CU_ASSERT(g_io_output_index == 2); 2627 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2628 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2629 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2630 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2631 bdev_io_cleanup(bdev_io); 2632 2633 spdk_put_io_channel(ch); 2634 free_test_req(&req); 2635 pbdev->process = NULL; 2636 2637 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2638 rpc_bdev_raid_delete(NULL, NULL); 2639 CU_ASSERT(g_rpc_err == 0); 2640 verify_raid_bdev_present("raid1", false); 2641 2642 raid_bdev_exit(); 2643 base_bdevs_cleanup(); 2644 reset_globals(); 2645 } 2646 2647 static int 2648 test_bdev_ioch_create(void *io_device, void *ctx_buf) 2649 { 2650 return 0; 2651 } 2652 2653 static void 2654 test_bdev_ioch_destroy(void *io_device, void *ctx_buf) 2655 { 2656 } 2657 2658 int 2659 main(int argc, char **argv) 2660 { 2661 unsigned int num_failures; 2662 2663 CU_TestInfo tests[] = { 2664 { "test_create_raid", test_create_raid }, 2665 { "test_create_raid_superblock", test_create_raid_superblock }, 2666 { "test_delete_raid", test_delete_raid }, 2667 { "test_create_raid_invalid_args", test_create_raid_invalid_args }, 2668 { "test_delete_raid_invalid_args", test_delete_raid_invalid_args }, 2669 { "test_io_channel", test_io_channel }, 2670 { "test_reset_io", test_reset_io }, 2671 { "test_write_io", test_write_io }, 2672 { "test_read_io", test_read_io }, 2673 { "test_unmap_io", test_unmap_io }, 2674 { "test_io_failure", test_io_failure }, 2675 { "test_multi_raid_no_io", test_multi_raid_no_io }, 2676 { "test_multi_raid_with_io", test_multi_raid_with_io }, 2677 { "test_io_type_supported", test_io_type_supported }, 2678 { "test_raid_json_dump_info", test_raid_json_dump_info }, 2679 { "test_context_size", test_context_size }, 2680 { "test_raid_level_conversions", test_raid_level_conversions }, 2681 { "test_raid_io_split", test_raid_io_split }, 2682 CU_TEST_INFO_NULL, 2683 }; 2684 /* TODO The RAID process test can only be run once for now, until the fix for getting the 2685 * process thread is merged */ 2686 CU_TestInfo tests_single_run[] = { 2687 { "test_raid_process", test_raid_process }, 2688 CU_TEST_INFO_NULL, 2689 }; 2690 CU_SuiteInfo suites[] = { 2691 { "raid", set_test_opts, NULL, NULL, NULL, tests }, 2692 { "raid_dif", set_test_opts_dif, NULL, NULL, NULL, tests }, 2693 { "raid_single_run", set_test_opts, NULL, NULL, NULL, tests_single_run }, 2694 CU_SUITE_INFO_NULL, 2695 }; 2696 2697 CU_initialize_registry(); 2698 CU_register_suites(suites); 2699 2700 allocate_threads(1); 2701 set_thread(0); 2702 spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0, 2703 NULL); 2704 2705 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2706 CU_cleanup_registry(); 2707 2708 spdk_io_device_unregister(&g_bdev_ch_io_device, NULL); 2709 free_threads(); 2710 2711 return num_failures; 2712 } 2713