1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/env.h" 10 #include "spdk_internal/mock.h" 11 #include "thread/thread_internal.h" 12 #include "bdev/raid/bdev_raid.c" 13 #include "bdev/raid/bdev_raid_rpc.c" 14 #include "bdev/raid/raid0.c" 15 #include "common/lib/ut_multithread.c" 16 17 #define MAX_BASE_DRIVES 32 18 #define MAX_RAIDS 2 19 #define INVALID_IO_SUBMIT 0xFFFF 20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5)) 21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul) 22 23 struct spdk_bdev_channel { 24 struct spdk_io_channel *channel; 25 }; 26 27 struct spdk_bdev_desc { 28 struct spdk_bdev *bdev; 29 }; 30 31 /* Data structure to capture the output of IO for verification */ 32 struct io_output { 33 struct spdk_bdev_desc *desc; 34 struct spdk_io_channel *ch; 35 uint64_t offset_blocks; 36 uint64_t num_blocks; 37 spdk_bdev_io_completion_cb cb; 38 void *cb_arg; 39 enum spdk_bdev_io_type iotype; 40 }; 41 42 struct raid_io_ranges { 43 uint64_t lba; 44 uint64_t nblocks; 45 }; 46 47 /* Globals */ 48 int g_bdev_io_submit_status; 49 struct io_output *g_io_output = NULL; 50 uint32_t g_io_output_index; 51 uint32_t g_io_comp_status; 52 bool g_child_io_status_flag; 53 void *g_rpc_req; 54 uint32_t g_rpc_req_size; 55 TAILQ_HEAD(bdev, spdk_bdev); 56 struct bdev g_bdev_list; 57 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry); 58 struct waitq g_io_waitq; 59 uint32_t g_block_len; 60 uint32_t g_strip_size; 61 uint32_t g_max_io_size; 62 uint8_t g_max_base_drives; 63 uint8_t g_max_raids; 64 uint8_t g_ignore_io_output; 65 uint8_t g_rpc_err; 66 char *g_get_raids_output[MAX_RAIDS]; 67 uint32_t g_get_raids_count; 68 uint8_t g_json_decode_obj_err; 69 uint8_t g_json_decode_obj_create; 70 uint8_t g_config_level_create = 0; 71 uint8_t g_test_multi_raids; 72 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE]; 73 uint32_t g_io_range_idx; 74 uint64_t g_lba_offset; 75 uint64_t g_bdev_ch_io_device; 76 bool g_bdev_io_defer_completion; 77 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios); 78 79 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 80 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 81 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, 82 enum spdk_bdev_io_type io_type), true); 83 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 84 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 85 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 86 void *cb_arg), 0); 87 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp), 88 NULL); 89 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func, 90 uint32_t state_mask)); 91 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias)); 92 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request, 93 struct spdk_json_write_ctx *w)); 94 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request, 95 bool value)); 96 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0); 97 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0); 98 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0); 99 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values, 100 spdk_json_decode_fn decode_func, 101 void *out, size_t max_size, size_t *out_size, size_t stride), 0); 102 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0); 103 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0); 104 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0); 105 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w, 106 const char *name), 0); 107 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0); 108 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0); 109 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0); 110 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0); 111 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w, 112 const char *name), 0); 113 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0); 114 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0); 115 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name, 116 uint64_t val), 0); 117 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL); 118 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 119 struct spdk_bdev_io_wait_entry *entry), 0); 120 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev, 121 struct spdk_memory_domain **domains, int array_size), 0); 122 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev"); 123 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0); 124 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false); 125 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false); 126 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev), 127 SPDK_DIF_DISABLE); 128 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false); 129 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0); 130 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev)); 131 132 int 133 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 134 raid_bdev_load_sb_cb cb, void *cb_ctx) 135 { 136 cb(NULL, -EINVAL, cb_ctx); 137 138 return 0; 139 } 140 141 void 142 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx) 143 { 144 cb(0, raid_bdev, cb_ctx); 145 } 146 147 const struct spdk_uuid * 148 spdk_bdev_get_uuid(const struct spdk_bdev *bdev) 149 { 150 return &bdev->uuid; 151 } 152 153 struct spdk_io_channel * 154 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc) 155 { 156 return spdk_get_io_channel(&g_bdev_ch_io_device); 157 } 158 159 static void 160 set_test_opts(void) 161 { 162 163 g_max_base_drives = MAX_BASE_DRIVES; 164 g_max_raids = MAX_RAIDS; 165 g_block_len = 4096; 166 g_strip_size = 64; 167 g_max_io_size = 1024; 168 169 printf("Test Options\n"); 170 printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, " 171 "g_max_raids = %u\n", 172 g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids); 173 } 174 175 /* Set globals before every test run */ 176 static void 177 set_globals(void) 178 { 179 uint32_t max_splits; 180 181 g_bdev_io_submit_status = 0; 182 if (g_max_io_size < g_strip_size) { 183 max_splits = 2; 184 } else { 185 max_splits = (g_max_io_size / g_strip_size) + 1; 186 } 187 if (max_splits < g_max_base_drives) { 188 max_splits = g_max_base_drives; 189 } 190 191 g_io_output = calloc(max_splits, sizeof(struct io_output)); 192 SPDK_CU_ASSERT_FATAL(g_io_output != NULL); 193 g_io_output_index = 0; 194 memset(g_get_raids_output, 0, sizeof(g_get_raids_output)); 195 g_get_raids_count = 0; 196 g_io_comp_status = 0; 197 g_ignore_io_output = 0; 198 g_config_level_create = 0; 199 g_rpc_err = 0; 200 g_test_multi_raids = 0; 201 g_child_io_status_flag = true; 202 TAILQ_INIT(&g_bdev_list); 203 TAILQ_INIT(&g_io_waitq); 204 g_rpc_req = NULL; 205 g_rpc_req_size = 0; 206 g_json_decode_obj_err = 0; 207 g_json_decode_obj_create = 0; 208 g_lba_offset = 0; 209 g_bdev_io_defer_completion = false; 210 } 211 212 static void 213 base_bdevs_cleanup(void) 214 { 215 struct spdk_bdev *bdev; 216 struct spdk_bdev *bdev_next; 217 218 if (!TAILQ_EMPTY(&g_bdev_list)) { 219 TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) { 220 free(bdev->name); 221 TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); 222 free(bdev); 223 } 224 } 225 } 226 227 static void 228 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev) 229 { 230 struct raid_base_bdev_info *base_info; 231 232 assert(raid_bdev != NULL); 233 assert(raid_bdev->base_bdev_info != NULL); 234 235 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 236 if (base_info->desc) { 237 raid_bdev_free_base_bdev_resource(base_info); 238 } 239 } 240 assert(raid_bdev->num_base_bdevs_discovered == 0); 241 raid_bdev_cleanup_and_free(raid_bdev); 242 } 243 244 /* Reset globals */ 245 static void 246 reset_globals(void) 247 { 248 if (g_io_output) { 249 free(g_io_output); 250 g_io_output = NULL; 251 } 252 g_rpc_req = NULL; 253 g_rpc_req_size = 0; 254 } 255 256 void 257 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, 258 uint64_t len) 259 { 260 cb(bdev_io->internal.ch->channel, bdev_io, true); 261 } 262 263 /* Store the IO completion status in global variable to verify by various tests */ 264 void 265 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 266 { 267 g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false); 268 } 269 270 static void 271 set_io_output(struct io_output *output, 272 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 273 uint64_t offset_blocks, uint64_t num_blocks, 274 spdk_bdev_io_completion_cb cb, void *cb_arg, 275 enum spdk_bdev_io_type iotype) 276 { 277 output->desc = desc; 278 output->ch = ch; 279 output->offset_blocks = offset_blocks; 280 output->num_blocks = num_blocks; 281 output->cb = cb; 282 output->cb_arg = cb_arg; 283 output->iotype = iotype; 284 } 285 286 static void 287 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg) 288 { 289 if (g_bdev_io_defer_completion) { 290 child_io->internal.cb = cb; 291 child_io->internal.caller_ctx = cb_arg; 292 TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link); 293 } else { 294 cb(child_io, g_child_io_status_flag, cb_arg); 295 } 296 } 297 298 static void 299 complete_deferred_ios(void) 300 { 301 struct spdk_bdev_io *child_io, *tmp; 302 303 TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) { 304 TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link); 305 child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx); 306 } 307 } 308 309 /* It will cache the split IOs for verification */ 310 int 311 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 312 struct iovec *iov, int iovcnt, 313 uint64_t offset_blocks, uint64_t num_blocks, 314 spdk_bdev_io_completion_cb cb, void *cb_arg) 315 { 316 struct io_output *output = &g_io_output[g_io_output_index]; 317 struct spdk_bdev_io *child_io; 318 319 if (g_ignore_io_output) { 320 return 0; 321 } 322 323 if (g_max_io_size < g_strip_size) { 324 SPDK_CU_ASSERT_FATAL(g_io_output_index < 2); 325 } else { 326 SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1); 327 } 328 if (g_bdev_io_submit_status == 0) { 329 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 330 SPDK_BDEV_IO_TYPE_WRITE); 331 g_io_output_index++; 332 333 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 334 SPDK_CU_ASSERT_FATAL(child_io != NULL); 335 child_io_complete(child_io, cb, cb_arg); 336 } 337 338 return g_bdev_io_submit_status; 339 } 340 341 int 342 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 343 struct iovec *iov, int iovcnt, 344 uint64_t offset_blocks, uint64_t num_blocks, 345 spdk_bdev_io_completion_cb cb, void *cb_arg, 346 struct spdk_bdev_ext_io_opts *opts) 347 { 348 return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 349 } 350 351 int 352 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 353 struct iovec *iov, int iovcnt, void *md, 354 uint64_t offset_blocks, uint64_t num_blocks, 355 spdk_bdev_io_completion_cb cb, void *cb_arg) 356 { 357 return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 358 } 359 360 int 361 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 362 spdk_bdev_io_completion_cb cb, void *cb_arg) 363 { 364 struct io_output *output = &g_io_output[g_io_output_index]; 365 struct spdk_bdev_io *child_io; 366 367 if (g_ignore_io_output) { 368 return 0; 369 } 370 371 if (g_bdev_io_submit_status == 0) { 372 set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET); 373 g_io_output_index++; 374 375 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 376 SPDK_CU_ASSERT_FATAL(child_io != NULL); 377 child_io_complete(child_io, cb, cb_arg); 378 } 379 380 return g_bdev_io_submit_status; 381 } 382 383 int 384 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 385 uint64_t offset_blocks, uint64_t num_blocks, 386 spdk_bdev_io_completion_cb cb, void *cb_arg) 387 { 388 struct io_output *output = &g_io_output[g_io_output_index]; 389 struct spdk_bdev_io *child_io; 390 391 if (g_ignore_io_output) { 392 return 0; 393 } 394 395 if (g_bdev_io_submit_status == 0) { 396 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 397 SPDK_BDEV_IO_TYPE_UNMAP); 398 g_io_output_index++; 399 400 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 401 SPDK_CU_ASSERT_FATAL(child_io != NULL); 402 child_io_complete(child_io, cb, cb_arg); 403 } 404 405 return g_bdev_io_submit_status; 406 } 407 408 void 409 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno) 410 { 411 CU_ASSERT(bdeverrno == 0); 412 SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL); 413 bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno); 414 } 415 416 int 417 spdk_bdev_register(struct spdk_bdev *bdev) 418 { 419 TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link); 420 return 0; 421 } 422 423 void 424 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 425 { 426 int ret; 427 428 SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev); 429 TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); 430 431 bdev->internal.unregister_cb = cb_fn; 432 bdev->internal.unregister_ctx = cb_arg; 433 434 ret = bdev->fn_table->destruct(bdev->ctxt); 435 CU_ASSERT(ret == 1); 436 437 poll_threads(); 438 } 439 440 int 441 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb, 442 void *event_ctx, struct spdk_bdev_desc **_desc) 443 { 444 struct spdk_bdev *bdev; 445 446 bdev = spdk_bdev_get_by_name(bdev_name); 447 if (bdev == NULL) { 448 return -ENODEV; 449 } 450 451 *_desc = (void *)bdev; 452 return 0; 453 } 454 455 struct spdk_bdev * 456 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 457 { 458 return (void *)desc; 459 } 460 461 int 462 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val) 463 { 464 if (!g_test_multi_raids) { 465 struct rpc_bdev_raid_create *req = g_rpc_req; 466 if (strcmp(name, "strip_size_kb") == 0) { 467 CU_ASSERT(req->strip_size_kb == val); 468 } else if (strcmp(name, "blocklen_shift") == 0) { 469 CU_ASSERT(spdk_u32log2(g_block_len) == val); 470 } else if (strcmp(name, "num_base_bdevs") == 0) { 471 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 472 } else if (strcmp(name, "state") == 0) { 473 CU_ASSERT(val == RAID_BDEV_STATE_ONLINE); 474 } else if (strcmp(name, "destruct_called") == 0) { 475 CU_ASSERT(val == 0); 476 } else if (strcmp(name, "num_base_bdevs_discovered") == 0) { 477 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 478 } 479 } 480 return 0; 481 } 482 483 int 484 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val) 485 { 486 if (g_test_multi_raids) { 487 if (strcmp(name, "name") == 0) { 488 g_get_raids_output[g_get_raids_count] = strdup(val); 489 SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL); 490 g_get_raids_count++; 491 } 492 } else { 493 struct rpc_bdev_raid_create *req = g_rpc_req; 494 if (strcmp(name, "raid_level") == 0) { 495 CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0); 496 } 497 } 498 return 0; 499 } 500 501 int 502 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val) 503 { 504 if (!g_test_multi_raids) { 505 struct rpc_bdev_raid_create *req = g_rpc_req; 506 if (strcmp(name, "superblock") == 0) { 507 CU_ASSERT(val == req->superblock_enabled); 508 } 509 } 510 return 0; 511 } 512 513 void 514 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) 515 { 516 if (bdev_io) { 517 free(bdev_io); 518 } 519 } 520 521 /* It will cache split IOs for verification */ 522 int 523 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 524 struct iovec *iov, int iovcnt, 525 uint64_t offset_blocks, uint64_t num_blocks, 526 spdk_bdev_io_completion_cb cb, void *cb_arg) 527 { 528 struct io_output *output = &g_io_output[g_io_output_index]; 529 struct spdk_bdev_io *child_io; 530 531 if (g_ignore_io_output) { 532 return 0; 533 } 534 535 SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1); 536 if (g_bdev_io_submit_status == 0) { 537 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 538 SPDK_BDEV_IO_TYPE_READ); 539 g_io_output_index++; 540 541 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 542 SPDK_CU_ASSERT_FATAL(child_io != NULL); 543 child_io_complete(child_io, cb, cb_arg); 544 } 545 546 return g_bdev_io_submit_status; 547 } 548 549 int 550 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 551 struct iovec *iov, int iovcnt, 552 uint64_t offset_blocks, uint64_t num_blocks, 553 spdk_bdev_io_completion_cb cb, void *cb_arg, 554 struct spdk_bdev_ext_io_opts *opts) 555 { 556 return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 557 } 558 559 int 560 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 561 struct iovec *iov, int iovcnt, void *md, 562 uint64_t offset_blocks, uint64_t num_blocks, 563 spdk_bdev_io_completion_cb cb, void *cb_arg) 564 { 565 return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 566 } 567 568 569 void 570 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) 571 { 572 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 573 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 574 bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE; 575 bdev->internal.claim.v1.module = NULL; 576 } 577 578 int 579 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 580 struct spdk_bdev_module *module) 581 { 582 if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) { 583 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 584 return -1; 585 } 586 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 587 bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE; 588 bdev->internal.claim.v1.module = module; 589 return 0; 590 } 591 592 int 593 spdk_json_decode_object(const struct spdk_json_val *values, 594 const struct spdk_json_object_decoder *decoders, size_t num_decoders, 595 void *out) 596 { 597 struct rpc_bdev_raid_create *req, *_out; 598 size_t i; 599 600 if (g_json_decode_obj_err) { 601 return -1; 602 } else if (g_json_decode_obj_create) { 603 req = g_rpc_req; 604 _out = out; 605 606 _out->name = strdup(req->name); 607 SPDK_CU_ASSERT_FATAL(_out->name != NULL); 608 _out->strip_size_kb = req->strip_size_kb; 609 _out->level = req->level; 610 _out->superblock_enabled = req->superblock_enabled; 611 _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs; 612 for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) { 613 _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]); 614 SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]); 615 } 616 } else { 617 memcpy(out, g_rpc_req, g_rpc_req_size); 618 } 619 620 return 0; 621 } 622 623 struct spdk_json_write_ctx * 624 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request) 625 { 626 return (void *)1; 627 } 628 629 void 630 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request, 631 int error_code, const char *msg) 632 { 633 g_rpc_err = 1; 634 } 635 636 void 637 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request, 638 int error_code, const char *fmt, ...) 639 { 640 g_rpc_err = 1; 641 } 642 643 struct spdk_bdev * 644 spdk_bdev_get_by_name(const char *bdev_name) 645 { 646 struct spdk_bdev *bdev; 647 648 if (!TAILQ_EMPTY(&g_bdev_list)) { 649 TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { 650 if (strcmp(bdev_name, bdev->name) == 0) { 651 return bdev; 652 } 653 } 654 } 655 656 return NULL; 657 } 658 659 int 660 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 661 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 662 { 663 if (cb_fn) { 664 cb_fn(cb_arg, 0); 665 } 666 667 return 0; 668 } 669 670 int 671 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 672 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 673 { 674 if (cb_fn) { 675 cb_fn(cb_arg, 0); 676 } 677 678 return 0; 679 } 680 681 int 682 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 683 uint64_t offset, uint64_t length, 684 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 685 { 686 if (cb_fn) { 687 cb_fn(cb_arg, 0); 688 } 689 690 return 0; 691 } 692 693 int 694 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 695 uint64_t offset, uint64_t length, 696 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 697 { 698 if (cb_fn) { 699 cb_fn(cb_arg, 0); 700 } 701 702 return 0; 703 } 704 705 static void 706 bdev_io_cleanup(struct spdk_bdev_io *bdev_io) 707 { 708 if (bdev_io->u.bdev.iovs) { 709 int i; 710 711 for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) { 712 free(bdev_io->u.bdev.iovs[i].iov_base); 713 } 714 free(bdev_io->u.bdev.iovs); 715 } 716 free(bdev_io); 717 } 718 719 static void 720 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, 721 struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype, 722 int iovcnt, size_t iov_len) 723 { 724 struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); 725 int i; 726 727 bdev_io->bdev = bdev; 728 bdev_io->u.bdev.offset_blocks = lba; 729 bdev_io->u.bdev.num_blocks = blocks; 730 bdev_io->type = iotype; 731 bdev_io->internal.ch = channel; 732 bdev_io->u.bdev.iovcnt = iovcnt; 733 734 if (iovcnt == 0) { 735 bdev_io->u.bdev.iovs = NULL; 736 return; 737 } 738 739 SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len); 740 741 bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec)); 742 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL); 743 744 for (i = 0; i < iovcnt; i++) { 745 struct iovec *iov = &bdev_io->u.bdev.iovs[i]; 746 747 iov->iov_base = calloc(1, iov_len); 748 SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL); 749 iov->iov_len = iov_len; 750 } 751 } 752 753 static void 754 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev, 755 uint64_t lba, uint64_t blocks, int16_t iotype) 756 { 757 int iovcnt; 758 size_t iov_len; 759 760 if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { 761 iovcnt = 0; 762 iov_len = 0; 763 } else { 764 iovcnt = 1; 765 iov_len = blocks * g_block_len; 766 } 767 768 _bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len); 769 } 770 771 static void 772 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 773 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 774 { 775 uint8_t index = 0; 776 struct io_output *output; 777 778 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 779 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 780 SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT); 781 SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); 782 783 CU_ASSERT(g_io_output_index == num_base_drives); 784 for (index = 0; index < g_io_output_index; index++) { 785 output = &g_io_output[index]; 786 CU_ASSERT(ch_ctx->base_channel[index] == output->ch); 787 CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc); 788 CU_ASSERT(bdev_io->type == output->iotype); 789 } 790 CU_ASSERT(g_io_comp_status == io_status); 791 } 792 793 static void 794 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 795 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 796 { 797 uint32_t strip_shift = spdk_u32log2(g_strip_size); 798 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 799 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 800 strip_shift; 801 uint32_t splits_reqd = (end_strip - start_strip + 1); 802 uint32_t strip; 803 uint64_t pd_strip; 804 uint8_t pd_idx; 805 uint32_t offset_in_strip; 806 uint64_t pd_lba; 807 uint64_t pd_blocks; 808 uint32_t index = 0; 809 struct io_output *output; 810 811 if (io_status == INVALID_IO_SUBMIT) { 812 CU_ASSERT(g_io_comp_status == false); 813 return; 814 } 815 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 816 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 817 818 CU_ASSERT(splits_reqd == g_io_output_index); 819 for (strip = start_strip; strip <= end_strip; strip++, index++) { 820 pd_strip = strip / num_base_drives; 821 pd_idx = strip % num_base_drives; 822 if (strip == start_strip) { 823 offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1); 824 pd_lba = (pd_strip << strip_shift) + offset_in_strip; 825 if (strip == end_strip) { 826 pd_blocks = bdev_io->u.bdev.num_blocks; 827 } else { 828 pd_blocks = g_strip_size - offset_in_strip; 829 } 830 } else if (strip == end_strip) { 831 pd_lba = pd_strip << strip_shift; 832 pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) & 833 (g_strip_size - 1)) + 1; 834 } else { 835 pd_lba = pd_strip << raid_bdev->strip_size_shift; 836 pd_blocks = raid_bdev->strip_size; 837 } 838 output = &g_io_output[index]; 839 CU_ASSERT(pd_lba == output->offset_blocks); 840 CU_ASSERT(pd_blocks == output->num_blocks); 841 CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch); 842 CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc); 843 CU_ASSERT(bdev_io->type == output->iotype); 844 } 845 CU_ASSERT(g_io_comp_status == io_status); 846 } 847 848 static void 849 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 850 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, 851 uint32_t io_status) 852 { 853 uint32_t strip_shift = spdk_u32log2(g_strip_size); 854 uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size; 855 uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) % 856 g_strip_size; 857 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 858 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 859 strip_shift; 860 uint8_t n_disks_involved; 861 uint64_t start_strip_disk_idx; 862 uint64_t end_strip_disk_idx; 863 uint64_t nblocks_in_start_disk; 864 uint64_t offset_in_start_disk; 865 uint8_t disk_idx; 866 uint64_t base_io_idx; 867 uint64_t sum_nblocks = 0; 868 struct io_output *output; 869 870 if (io_status == INVALID_IO_SUBMIT) { 871 CU_ASSERT(g_io_comp_status == false); 872 return; 873 } 874 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 875 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 876 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ); 877 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE); 878 879 n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives); 880 CU_ASSERT(n_disks_involved == g_io_output_index); 881 882 start_strip_disk_idx = start_strip % num_base_drives; 883 end_strip_disk_idx = end_strip % num_base_drives; 884 885 offset_in_start_disk = g_io_output[0].offset_blocks; 886 nblocks_in_start_disk = g_io_output[0].num_blocks; 887 888 for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved; 889 base_io_idx++, disk_idx++) { 890 uint64_t start_offset_in_disk; 891 uint64_t end_offset_in_disk; 892 893 output = &g_io_output[base_io_idx]; 894 895 /* round disk_idx */ 896 if (disk_idx >= num_base_drives) { 897 disk_idx %= num_base_drives; 898 } 899 900 /* start_offset_in_disk aligned in strip check: 901 * The first base io has a same start_offset_in_strip with the whole raid io. 902 * Other base io should have aligned start_offset_in_strip which is 0. 903 */ 904 start_offset_in_disk = output->offset_blocks; 905 if (base_io_idx == 0) { 906 CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip); 907 } else { 908 CU_ASSERT(start_offset_in_disk % g_strip_size == 0); 909 } 910 911 /* end_offset_in_disk aligned in strip check: 912 * Base io on disk at which end_strip is located, has a same end_offset_in_strip 913 * with the whole raid io. 914 * Other base io should have aligned end_offset_in_strip. 915 */ 916 end_offset_in_disk = output->offset_blocks + output->num_blocks - 1; 917 if (disk_idx == end_strip_disk_idx) { 918 CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip); 919 } else { 920 CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1); 921 } 922 923 /* start_offset_in_disk compared with start_disk. 924 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk 925 * mustn't be larger than the start offset of start_offset_in_disk; And the gap 926 * must be less than strip size. 927 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk 928 * must be larger than the start offset of start_offset_in_disk; And the gap mustn't 929 * be less than strip size. 930 */ 931 if (disk_idx > start_strip_disk_idx) { 932 CU_ASSERT(start_offset_in_disk <= offset_in_start_disk); 933 CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size); 934 } else if (disk_idx < start_strip_disk_idx) { 935 CU_ASSERT(start_offset_in_disk > offset_in_start_disk); 936 CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size); 937 } 938 939 /* nblocks compared with start_disk: 940 * The gap between them must be within a strip size. 941 */ 942 if (output->num_blocks <= nblocks_in_start_disk) { 943 CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size); 944 } else { 945 CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size); 946 } 947 948 sum_nblocks += output->num_blocks; 949 950 CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch); 951 CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc); 952 CU_ASSERT(bdev_io->type == output->iotype); 953 } 954 955 /* Sum of each nblocks should be same with raid bdev_io */ 956 CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks); 957 958 CU_ASSERT(g_io_comp_status == io_status); 959 } 960 961 static void 962 verify_raid_bdev_present(const char *name, bool presence) 963 { 964 struct raid_bdev *pbdev; 965 bool pbdev_found; 966 967 pbdev_found = false; 968 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 969 if (strcmp(pbdev->bdev.name, name) == 0) { 970 pbdev_found = true; 971 break; 972 } 973 } 974 if (presence == true) { 975 CU_ASSERT(pbdev_found == true); 976 } else { 977 CU_ASSERT(pbdev_found == false); 978 } 979 } 980 981 static void 982 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state) 983 { 984 struct raid_bdev *pbdev; 985 struct raid_base_bdev_info *base_info; 986 struct spdk_bdev *bdev = NULL; 987 bool pbdev_found; 988 uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF; 989 990 pbdev_found = false; 991 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 992 if (strcmp(pbdev->bdev.name, r->name) == 0) { 993 pbdev_found = true; 994 if (presence == false) { 995 break; 996 } 997 CU_ASSERT(pbdev->base_bdev_info != NULL); 998 CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len)); 999 CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) / 1000 g_block_len))); 1001 CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len)); 1002 CU_ASSERT((uint32_t)pbdev->state == raid_state); 1003 CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs); 1004 CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs); 1005 CU_ASSERT(pbdev->level == r->level); 1006 CU_ASSERT(pbdev->base_bdev_info != NULL); 1007 RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) { 1008 CU_ASSERT(base_info->desc != NULL); 1009 bdev = spdk_bdev_desc_get_bdev(base_info->desc); 1010 CU_ASSERT(bdev != NULL); 1011 CU_ASSERT(base_info->remove_scheduled == false); 1012 CU_ASSERT((pbdev->sb != NULL && base_info->data_offset != 0) || 1013 (pbdev->sb == NULL && base_info->data_offset == 0)); 1014 CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt); 1015 1016 if (bdev && base_info->data_size < min_blockcnt) { 1017 min_blockcnt = base_info->data_size; 1018 } 1019 } 1020 CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) * 1021 (r->strip_size_kb * 1024 / g_block_len)) * 1022 r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt); 1023 CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0); 1024 CU_ASSERT(pbdev->bdev.write_cache == 0); 1025 CU_ASSERT(pbdev->bdev.blocklen == g_block_len); 1026 if (pbdev->num_base_bdevs > 1) { 1027 CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size); 1028 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true); 1029 } else { 1030 CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0); 1031 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false); 1032 } 1033 CU_ASSERT(pbdev->bdev.ctxt == pbdev); 1034 CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table); 1035 CU_ASSERT(pbdev->bdev.module == &g_raid_if); 1036 break; 1037 } 1038 } 1039 if (presence == true) { 1040 CU_ASSERT(pbdev_found == true); 1041 } else { 1042 CU_ASSERT(pbdev_found == false); 1043 } 1044 } 1045 1046 static void 1047 verify_get_raids(struct rpc_bdev_raid_create *construct_req, 1048 uint8_t g_max_raids, 1049 char **g_get_raids_output, uint32_t g_get_raids_count) 1050 { 1051 uint8_t i, j; 1052 bool found; 1053 1054 CU_ASSERT(g_max_raids == g_get_raids_count); 1055 if (g_max_raids == g_get_raids_count) { 1056 for (i = 0; i < g_max_raids; i++) { 1057 found = false; 1058 for (j = 0; j < g_max_raids; j++) { 1059 if (construct_req[i].name && 1060 strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) { 1061 found = true; 1062 break; 1063 } 1064 } 1065 CU_ASSERT(found == true); 1066 } 1067 } 1068 } 1069 1070 static void 1071 create_base_bdevs(uint32_t bbdev_start_idx) 1072 { 1073 uint8_t i; 1074 struct spdk_bdev *base_bdev; 1075 char name[16]; 1076 1077 for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) { 1078 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1"); 1079 base_bdev = calloc(1, sizeof(struct spdk_bdev)); 1080 SPDK_CU_ASSERT_FATAL(base_bdev != NULL); 1081 base_bdev->name = strdup(name); 1082 spdk_uuid_generate(&base_bdev->uuid); 1083 SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL); 1084 base_bdev->blocklen = g_block_len; 1085 base_bdev->blockcnt = BLOCK_CNT; 1086 TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link); 1087 } 1088 } 1089 1090 static void 1091 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name, 1092 uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled) 1093 { 1094 uint8_t i; 1095 char name[16]; 1096 uint8_t bbdev_idx = bbdev_start_idx; 1097 1098 r->name = strdup(raid_name); 1099 SPDK_CU_ASSERT_FATAL(r->name != NULL); 1100 r->strip_size_kb = (g_strip_size * g_block_len) / 1024; 1101 r->level = RAID0; 1102 r->superblock_enabled = superblock_enabled; 1103 r->base_bdevs.num_base_bdevs = g_max_base_drives; 1104 for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) { 1105 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1"); 1106 r->base_bdevs.base_bdevs[i] = strdup(name); 1107 SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL); 1108 } 1109 if (create_base_bdev == true) { 1110 create_base_bdevs(bbdev_start_idx); 1111 } 1112 g_rpc_req = r; 1113 g_rpc_req_size = sizeof(*r); 1114 } 1115 1116 static void 1117 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name, 1118 uint8_t bbdev_start_idx, bool create_base_bdev, 1119 uint8_t json_decode_obj_err, bool superblock_enabled) 1120 { 1121 create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled); 1122 1123 g_rpc_err = 0; 1124 g_json_decode_obj_create = 1; 1125 g_json_decode_obj_err = json_decode_obj_err; 1126 g_config_level_create = 0; 1127 g_test_multi_raids = 0; 1128 } 1129 1130 static void 1131 free_test_req(struct rpc_bdev_raid_create *r) 1132 { 1133 uint8_t i; 1134 1135 free(r->name); 1136 for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) { 1137 free(r->base_bdevs.base_bdevs[i]); 1138 } 1139 } 1140 1141 static void 1142 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name, 1143 uint8_t json_decode_obj_err) 1144 { 1145 r->name = strdup(raid_name); 1146 SPDK_CU_ASSERT_FATAL(r->name != NULL); 1147 1148 g_rpc_req = r; 1149 g_rpc_req_size = sizeof(*r); 1150 g_rpc_err = 0; 1151 g_json_decode_obj_create = 0; 1152 g_json_decode_obj_err = json_decode_obj_err; 1153 g_config_level_create = 0; 1154 g_test_multi_raids = 0; 1155 } 1156 1157 static void 1158 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category, 1159 uint8_t json_decode_obj_err) 1160 { 1161 r->category = strdup(category); 1162 SPDK_CU_ASSERT_FATAL(r->category != NULL); 1163 1164 g_rpc_req = r; 1165 g_rpc_req_size = sizeof(*r); 1166 g_rpc_err = 0; 1167 g_json_decode_obj_create = 0; 1168 g_json_decode_obj_err = json_decode_obj_err; 1169 g_config_level_create = 0; 1170 g_test_multi_raids = 1; 1171 g_get_raids_count = 0; 1172 } 1173 1174 static void 1175 test_create_raid(void) 1176 { 1177 struct rpc_bdev_raid_create req; 1178 struct rpc_bdev_raid_delete delete_req; 1179 1180 set_globals(); 1181 CU_ASSERT(raid_bdev_init() == 0); 1182 1183 verify_raid_bdev_present("raid1", false); 1184 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1185 rpc_bdev_raid_create(NULL, NULL); 1186 CU_ASSERT(g_rpc_err == 0); 1187 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1188 free_test_req(&req); 1189 1190 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1191 rpc_bdev_raid_delete(NULL, NULL); 1192 CU_ASSERT(g_rpc_err == 0); 1193 raid_bdev_exit(); 1194 base_bdevs_cleanup(); 1195 reset_globals(); 1196 } 1197 1198 static void 1199 test_delete_raid(void) 1200 { 1201 struct rpc_bdev_raid_create construct_req; 1202 struct rpc_bdev_raid_delete delete_req; 1203 1204 set_globals(); 1205 CU_ASSERT(raid_bdev_init() == 0); 1206 1207 verify_raid_bdev_present("raid1", false); 1208 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1209 rpc_bdev_raid_create(NULL, NULL); 1210 CU_ASSERT(g_rpc_err == 0); 1211 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1212 free_test_req(&construct_req); 1213 1214 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1215 rpc_bdev_raid_delete(NULL, NULL); 1216 CU_ASSERT(g_rpc_err == 0); 1217 verify_raid_bdev_present("raid1", false); 1218 1219 raid_bdev_exit(); 1220 base_bdevs_cleanup(); 1221 reset_globals(); 1222 } 1223 1224 static void 1225 test_create_raid_invalid_args(void) 1226 { 1227 struct rpc_bdev_raid_create req; 1228 struct rpc_bdev_raid_delete destroy_req; 1229 struct raid_bdev *raid_bdev; 1230 1231 set_globals(); 1232 CU_ASSERT(raid_bdev_init() == 0); 1233 1234 verify_raid_bdev_present("raid1", false); 1235 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1236 req.level = INVALID_RAID_LEVEL; 1237 rpc_bdev_raid_create(NULL, NULL); 1238 CU_ASSERT(g_rpc_err == 1); 1239 free_test_req(&req); 1240 verify_raid_bdev_present("raid1", false); 1241 1242 create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false); 1243 rpc_bdev_raid_create(NULL, NULL); 1244 CU_ASSERT(g_rpc_err == 1); 1245 free_test_req(&req); 1246 verify_raid_bdev_present("raid1", false); 1247 1248 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1249 req.strip_size_kb = 1231; 1250 rpc_bdev_raid_create(NULL, NULL); 1251 CU_ASSERT(g_rpc_err == 1); 1252 free_test_req(&req); 1253 verify_raid_bdev_present("raid1", false); 1254 1255 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1256 rpc_bdev_raid_create(NULL, NULL); 1257 CU_ASSERT(g_rpc_err == 0); 1258 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1259 free_test_req(&req); 1260 1261 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1262 rpc_bdev_raid_create(NULL, NULL); 1263 CU_ASSERT(g_rpc_err == 1); 1264 free_test_req(&req); 1265 1266 create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false); 1267 rpc_bdev_raid_create(NULL, NULL); 1268 CU_ASSERT(g_rpc_err == 1); 1269 free_test_req(&req); 1270 verify_raid_bdev_present("raid2", false); 1271 1272 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1273 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1274 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1"); 1275 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1276 rpc_bdev_raid_create(NULL, NULL); 1277 CU_ASSERT(g_rpc_err == 1); 1278 free_test_req(&req); 1279 verify_raid_bdev_present("raid2", false); 1280 1281 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1282 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1283 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1"); 1284 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1285 rpc_bdev_raid_create(NULL, NULL); 1286 CU_ASSERT(g_rpc_err == 0); 1287 free_test_req(&req); 1288 verify_raid_bdev_present("raid2", true); 1289 raid_bdev = raid_bdev_find_by_name("raid2"); 1290 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 1291 check_and_remove_raid_bdev(raid_bdev); 1292 1293 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false); 1294 rpc_bdev_raid_create(NULL, NULL); 1295 CU_ASSERT(g_rpc_err == 0); 1296 free_test_req(&req); 1297 verify_raid_bdev_present("raid2", true); 1298 verify_raid_bdev_present("raid1", true); 1299 1300 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1301 rpc_bdev_raid_delete(NULL, NULL); 1302 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1303 rpc_bdev_raid_delete(NULL, NULL); 1304 raid_bdev_exit(); 1305 base_bdevs_cleanup(); 1306 reset_globals(); 1307 } 1308 1309 static void 1310 test_delete_raid_invalid_args(void) 1311 { 1312 struct rpc_bdev_raid_create construct_req; 1313 struct rpc_bdev_raid_delete destroy_req; 1314 1315 set_globals(); 1316 CU_ASSERT(raid_bdev_init() == 0); 1317 1318 verify_raid_bdev_present("raid1", false); 1319 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1320 rpc_bdev_raid_create(NULL, NULL); 1321 CU_ASSERT(g_rpc_err == 0); 1322 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1323 free_test_req(&construct_req); 1324 1325 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1326 rpc_bdev_raid_delete(NULL, NULL); 1327 CU_ASSERT(g_rpc_err == 1); 1328 1329 create_raid_bdev_delete_req(&destroy_req, "raid1", 1); 1330 rpc_bdev_raid_delete(NULL, NULL); 1331 CU_ASSERT(g_rpc_err == 1); 1332 free(destroy_req.name); 1333 verify_raid_bdev_present("raid1", true); 1334 1335 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1336 rpc_bdev_raid_delete(NULL, NULL); 1337 CU_ASSERT(g_rpc_err == 0); 1338 verify_raid_bdev_present("raid1", false); 1339 1340 raid_bdev_exit(); 1341 base_bdevs_cleanup(); 1342 reset_globals(); 1343 } 1344 1345 static void 1346 test_io_channel(void) 1347 { 1348 struct rpc_bdev_raid_create req; 1349 struct rpc_bdev_raid_delete destroy_req; 1350 struct raid_bdev *pbdev; 1351 struct spdk_io_channel *ch; 1352 struct raid_bdev_io_channel *ch_ctx; 1353 1354 set_globals(); 1355 CU_ASSERT(raid_bdev_init() == 0); 1356 1357 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1358 verify_raid_bdev_present("raid1", false); 1359 rpc_bdev_raid_create(NULL, NULL); 1360 CU_ASSERT(g_rpc_err == 0); 1361 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1362 1363 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1364 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1365 break; 1366 } 1367 } 1368 CU_ASSERT(pbdev != NULL); 1369 1370 ch = spdk_get_io_channel(pbdev); 1371 SPDK_CU_ASSERT_FATAL(ch != NULL); 1372 1373 ch_ctx = spdk_io_channel_get_ctx(ch); 1374 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1375 1376 free_test_req(&req); 1377 1378 spdk_put_io_channel(ch); 1379 1380 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1381 rpc_bdev_raid_delete(NULL, NULL); 1382 CU_ASSERT(g_rpc_err == 0); 1383 verify_raid_bdev_present("raid1", false); 1384 1385 raid_bdev_exit(); 1386 base_bdevs_cleanup(); 1387 reset_globals(); 1388 } 1389 1390 static void 1391 test_write_io(void) 1392 { 1393 struct rpc_bdev_raid_create req; 1394 struct rpc_bdev_raid_delete destroy_req; 1395 struct raid_bdev *pbdev; 1396 struct spdk_io_channel *ch; 1397 struct raid_bdev_io_channel *ch_ctx; 1398 uint8_t i; 1399 struct spdk_bdev_io *bdev_io; 1400 uint64_t io_len; 1401 uint64_t lba = 0; 1402 1403 set_globals(); 1404 CU_ASSERT(raid_bdev_init() == 0); 1405 1406 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1407 verify_raid_bdev_present("raid1", false); 1408 rpc_bdev_raid_create(NULL, NULL); 1409 CU_ASSERT(g_rpc_err == 0); 1410 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1411 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1412 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1413 break; 1414 } 1415 } 1416 CU_ASSERT(pbdev != NULL); 1417 1418 ch = spdk_get_io_channel(pbdev); 1419 SPDK_CU_ASSERT_FATAL(ch != NULL); 1420 1421 ch_ctx = spdk_io_channel_get_ctx(ch); 1422 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1423 1424 /* test 2 IO sizes based on global strip size set earlier */ 1425 for (i = 0; i < 2; i++) { 1426 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1427 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1428 io_len = (g_strip_size / 2) << i; 1429 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1430 lba += g_strip_size; 1431 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1432 g_io_output_index = 0; 1433 raid_bdev_submit_request(ch, bdev_io); 1434 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1435 g_child_io_status_flag); 1436 bdev_io_cleanup(bdev_io); 1437 } 1438 1439 free_test_req(&req); 1440 spdk_put_io_channel(ch); 1441 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1442 rpc_bdev_raid_delete(NULL, NULL); 1443 CU_ASSERT(g_rpc_err == 0); 1444 verify_raid_bdev_present("raid1", false); 1445 1446 raid_bdev_exit(); 1447 base_bdevs_cleanup(); 1448 reset_globals(); 1449 } 1450 1451 static void 1452 test_read_io(void) 1453 { 1454 struct rpc_bdev_raid_create req; 1455 struct rpc_bdev_raid_delete destroy_req; 1456 struct raid_bdev *pbdev; 1457 struct spdk_io_channel *ch; 1458 struct raid_bdev_io_channel *ch_ctx; 1459 uint8_t i; 1460 struct spdk_bdev_io *bdev_io; 1461 uint64_t io_len; 1462 uint64_t lba; 1463 1464 set_globals(); 1465 CU_ASSERT(raid_bdev_init() == 0); 1466 1467 verify_raid_bdev_present("raid1", false); 1468 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1469 rpc_bdev_raid_create(NULL, NULL); 1470 CU_ASSERT(g_rpc_err == 0); 1471 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1472 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1473 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1474 break; 1475 } 1476 } 1477 CU_ASSERT(pbdev != NULL); 1478 1479 ch = spdk_get_io_channel(pbdev); 1480 SPDK_CU_ASSERT_FATAL(ch != NULL); 1481 1482 ch_ctx = spdk_io_channel_get_ctx(ch); 1483 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1484 1485 /* test 2 IO sizes based on global strip size set earlier */ 1486 lba = 0; 1487 for (i = 0; i < 2; i++) { 1488 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1489 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1490 io_len = (g_strip_size / 2) << i; 1491 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ); 1492 lba += g_strip_size; 1493 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1494 g_io_output_index = 0; 1495 raid_bdev_submit_request(ch, bdev_io); 1496 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1497 g_child_io_status_flag); 1498 bdev_io_cleanup(bdev_io); 1499 } 1500 1501 free_test_req(&req); 1502 spdk_put_io_channel(ch); 1503 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1504 rpc_bdev_raid_delete(NULL, NULL); 1505 CU_ASSERT(g_rpc_err == 0); 1506 verify_raid_bdev_present("raid1", false); 1507 1508 raid_bdev_exit(); 1509 base_bdevs_cleanup(); 1510 reset_globals(); 1511 } 1512 1513 static void 1514 raid_bdev_io_generate_by_strips(uint64_t n_strips) 1515 { 1516 uint64_t lba; 1517 uint64_t nblocks; 1518 uint64_t start_offset; 1519 uint64_t end_offset; 1520 uint64_t offsets_in_strip[3]; 1521 uint64_t start_bdev_idx; 1522 uint64_t start_bdev_offset; 1523 uint64_t start_bdev_idxs[3]; 1524 int i, j, l; 1525 1526 /* 3 different situations of offset in strip */ 1527 offsets_in_strip[0] = 0; 1528 offsets_in_strip[1] = g_strip_size >> 1; 1529 offsets_in_strip[2] = g_strip_size - 1; 1530 1531 /* 3 different situations of start_bdev_idx */ 1532 start_bdev_idxs[0] = 0; 1533 start_bdev_idxs[1] = g_max_base_drives >> 1; 1534 start_bdev_idxs[2] = g_max_base_drives - 1; 1535 1536 /* consider different offset in strip */ 1537 for (i = 0; i < 3; i++) { 1538 start_offset = offsets_in_strip[i]; 1539 for (j = 0; j < 3; j++) { 1540 end_offset = offsets_in_strip[j]; 1541 if (n_strips == 1 && start_offset > end_offset) { 1542 continue; 1543 } 1544 1545 /* consider at which base_bdev lba is started. */ 1546 for (l = 0; l < 3; l++) { 1547 start_bdev_idx = start_bdev_idxs[l]; 1548 start_bdev_offset = start_bdev_idx * g_strip_size; 1549 lba = g_lba_offset + start_bdev_offset + start_offset; 1550 nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1; 1551 1552 g_io_ranges[g_io_range_idx].lba = lba; 1553 g_io_ranges[g_io_range_idx].nblocks = nblocks; 1554 1555 SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE); 1556 g_io_range_idx++; 1557 } 1558 } 1559 } 1560 } 1561 1562 static void 1563 raid_bdev_io_generate(void) 1564 { 1565 uint64_t n_strips; 1566 uint64_t n_strips_span = g_max_base_drives; 1567 uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1, 1568 g_max_base_drives * 2, g_max_base_drives * 3, 1569 g_max_base_drives * 4 1570 }; 1571 uint32_t i; 1572 1573 g_io_range_idx = 0; 1574 1575 /* consider different number of strips from 1 to strips spanned base bdevs, 1576 * and even to times of strips spanned base bdevs 1577 */ 1578 for (n_strips = 1; n_strips < n_strips_span; n_strips++) { 1579 raid_bdev_io_generate_by_strips(n_strips); 1580 } 1581 1582 for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) { 1583 n_strips = n_strips_times[i]; 1584 raid_bdev_io_generate_by_strips(n_strips); 1585 } 1586 } 1587 1588 static void 1589 test_unmap_io(void) 1590 { 1591 struct rpc_bdev_raid_create req; 1592 struct rpc_bdev_raid_delete destroy_req; 1593 struct raid_bdev *pbdev; 1594 struct spdk_io_channel *ch; 1595 struct raid_bdev_io_channel *ch_ctx; 1596 struct spdk_bdev_io *bdev_io; 1597 uint32_t count; 1598 uint64_t io_len; 1599 uint64_t lba; 1600 1601 set_globals(); 1602 CU_ASSERT(raid_bdev_init() == 0); 1603 1604 verify_raid_bdev_present("raid1", false); 1605 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1606 rpc_bdev_raid_create(NULL, NULL); 1607 CU_ASSERT(g_rpc_err == 0); 1608 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1609 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1610 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1611 break; 1612 } 1613 } 1614 CU_ASSERT(pbdev != NULL); 1615 1616 ch = spdk_get_io_channel(pbdev); 1617 SPDK_CU_ASSERT_FATAL(ch != NULL); 1618 1619 ch_ctx = spdk_io_channel_get_ctx(ch); 1620 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1621 1622 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true); 1623 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true); 1624 1625 raid_bdev_io_generate(); 1626 for (count = 0; count < g_io_range_idx; count++) { 1627 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1628 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1629 io_len = g_io_ranges[count].nblocks; 1630 lba = g_io_ranges[count].lba; 1631 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP); 1632 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1633 g_io_output_index = 0; 1634 raid_bdev_submit_request(ch, bdev_io); 1635 verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1636 g_child_io_status_flag); 1637 bdev_io_cleanup(bdev_io); 1638 } 1639 1640 free_test_req(&req); 1641 spdk_put_io_channel(ch); 1642 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1643 rpc_bdev_raid_delete(NULL, NULL); 1644 CU_ASSERT(g_rpc_err == 0); 1645 verify_raid_bdev_present("raid1", false); 1646 1647 raid_bdev_exit(); 1648 base_bdevs_cleanup(); 1649 reset_globals(); 1650 } 1651 1652 /* Test IO failures */ 1653 static void 1654 test_io_failure(void) 1655 { 1656 struct rpc_bdev_raid_create req; 1657 struct rpc_bdev_raid_delete destroy_req; 1658 struct raid_bdev *pbdev; 1659 struct spdk_io_channel *ch; 1660 struct raid_bdev_io_channel *ch_ctx; 1661 struct spdk_bdev_io *bdev_io; 1662 uint32_t count; 1663 uint64_t io_len; 1664 uint64_t lba; 1665 1666 set_globals(); 1667 CU_ASSERT(raid_bdev_init() == 0); 1668 1669 verify_raid_bdev_present("raid1", false); 1670 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1671 rpc_bdev_raid_create(NULL, NULL); 1672 CU_ASSERT(g_rpc_err == 0); 1673 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1674 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1675 if (strcmp(pbdev->bdev.name, req.name) == 0) { 1676 break; 1677 } 1678 } 1679 CU_ASSERT(pbdev != NULL); 1680 1681 ch = spdk_get_io_channel(pbdev); 1682 SPDK_CU_ASSERT_FATAL(ch != NULL); 1683 1684 ch_ctx = spdk_io_channel_get_ctx(ch); 1685 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1686 1687 lba = 0; 1688 for (count = 0; count < 1; count++) { 1689 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1690 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1691 io_len = (g_strip_size / 2) << count; 1692 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID); 1693 lba += g_strip_size; 1694 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1695 g_io_output_index = 0; 1696 raid_bdev_submit_request(ch, bdev_io); 1697 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1698 INVALID_IO_SUBMIT); 1699 bdev_io_cleanup(bdev_io); 1700 } 1701 1702 1703 lba = 0; 1704 g_child_io_status_flag = false; 1705 for (count = 0; count < 1; count++) { 1706 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1707 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1708 io_len = (g_strip_size / 2) << count; 1709 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1710 lba += g_strip_size; 1711 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1712 g_io_output_index = 0; 1713 raid_bdev_submit_request(ch, bdev_io); 1714 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1715 g_child_io_status_flag); 1716 bdev_io_cleanup(bdev_io); 1717 } 1718 1719 free_test_req(&req); 1720 spdk_put_io_channel(ch); 1721 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1722 rpc_bdev_raid_delete(NULL, NULL); 1723 CU_ASSERT(g_rpc_err == 0); 1724 verify_raid_bdev_present("raid1", false); 1725 1726 raid_bdev_exit(); 1727 base_bdevs_cleanup(); 1728 reset_globals(); 1729 } 1730 1731 /* Test reset IO */ 1732 static void 1733 test_reset_io(void) 1734 { 1735 struct rpc_bdev_raid_create req; 1736 struct rpc_bdev_raid_delete destroy_req; 1737 struct raid_bdev *pbdev; 1738 struct spdk_io_channel *ch; 1739 struct raid_bdev_io_channel *ch_ctx; 1740 struct spdk_bdev_io *bdev_io; 1741 1742 set_globals(); 1743 CU_ASSERT(raid_bdev_init() == 0); 1744 1745 verify_raid_bdev_present("raid1", false); 1746 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1747 rpc_bdev_raid_create(NULL, NULL); 1748 CU_ASSERT(g_rpc_err == 0); 1749 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1750 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1751 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1752 break; 1753 } 1754 } 1755 CU_ASSERT(pbdev != NULL); 1756 1757 ch = spdk_get_io_channel(pbdev); 1758 SPDK_CU_ASSERT_FATAL(ch != NULL); 1759 1760 ch_ctx = spdk_io_channel_get_ctx(ch); 1761 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1762 1763 g_bdev_io_submit_status = 0; 1764 g_child_io_status_flag = true; 1765 1766 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true); 1767 1768 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1769 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1770 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET); 1771 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1772 g_io_output_index = 0; 1773 raid_bdev_submit_request(ch, bdev_io); 1774 verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1775 true); 1776 bdev_io_cleanup(bdev_io); 1777 1778 free_test_req(&req); 1779 spdk_put_io_channel(ch); 1780 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1781 rpc_bdev_raid_delete(NULL, NULL); 1782 CU_ASSERT(g_rpc_err == 0); 1783 verify_raid_bdev_present("raid1", false); 1784 1785 raid_bdev_exit(); 1786 base_bdevs_cleanup(); 1787 reset_globals(); 1788 } 1789 1790 /* Create multiple raids, destroy raids without IO, get_raids related tests */ 1791 static void 1792 test_multi_raid_no_io(void) 1793 { 1794 struct rpc_bdev_raid_create *construct_req; 1795 struct rpc_bdev_raid_delete destroy_req; 1796 struct rpc_bdev_raid_get_bdevs get_raids_req; 1797 uint8_t i; 1798 char name[16]; 1799 uint8_t bbdev_idx = 0; 1800 1801 set_globals(); 1802 construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create)); 1803 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 1804 CU_ASSERT(raid_bdev_init() == 0); 1805 for (i = 0; i < g_max_raids; i++) { 1806 snprintf(name, 16, "%s%u", "raid", i); 1807 verify_raid_bdev_present(name, false); 1808 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 1809 bbdev_idx += g_max_base_drives; 1810 rpc_bdev_raid_create(NULL, NULL); 1811 CU_ASSERT(g_rpc_err == 0); 1812 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 1813 } 1814 1815 create_get_raids_req(&get_raids_req, "all", 0); 1816 rpc_bdev_raid_get_bdevs(NULL, NULL); 1817 CU_ASSERT(g_rpc_err == 0); 1818 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 1819 for (i = 0; i < g_get_raids_count; i++) { 1820 free(g_get_raids_output[i]); 1821 } 1822 1823 create_get_raids_req(&get_raids_req, "online", 0); 1824 rpc_bdev_raid_get_bdevs(NULL, NULL); 1825 CU_ASSERT(g_rpc_err == 0); 1826 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 1827 for (i = 0; i < g_get_raids_count; i++) { 1828 free(g_get_raids_output[i]); 1829 } 1830 1831 create_get_raids_req(&get_raids_req, "configuring", 0); 1832 rpc_bdev_raid_get_bdevs(NULL, NULL); 1833 CU_ASSERT(g_rpc_err == 0); 1834 CU_ASSERT(g_get_raids_count == 0); 1835 1836 create_get_raids_req(&get_raids_req, "offline", 0); 1837 rpc_bdev_raid_get_bdevs(NULL, NULL); 1838 CU_ASSERT(g_rpc_err == 0); 1839 CU_ASSERT(g_get_raids_count == 0); 1840 1841 create_get_raids_req(&get_raids_req, "invalid_category", 0); 1842 rpc_bdev_raid_get_bdevs(NULL, NULL); 1843 CU_ASSERT(g_rpc_err == 1); 1844 CU_ASSERT(g_get_raids_count == 0); 1845 1846 create_get_raids_req(&get_raids_req, "all", 1); 1847 rpc_bdev_raid_get_bdevs(NULL, NULL); 1848 CU_ASSERT(g_rpc_err == 1); 1849 free(get_raids_req.category); 1850 CU_ASSERT(g_get_raids_count == 0); 1851 1852 create_get_raids_req(&get_raids_req, "all", 0); 1853 rpc_bdev_raid_get_bdevs(NULL, NULL); 1854 CU_ASSERT(g_rpc_err == 0); 1855 CU_ASSERT(g_get_raids_count == g_max_raids); 1856 for (i = 0; i < g_get_raids_count; i++) { 1857 free(g_get_raids_output[i]); 1858 } 1859 1860 for (i = 0; i < g_max_raids; i++) { 1861 SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL); 1862 snprintf(name, 16, "%s", construct_req[i].name); 1863 create_raid_bdev_delete_req(&destroy_req, name, 0); 1864 rpc_bdev_raid_delete(NULL, NULL); 1865 CU_ASSERT(g_rpc_err == 0); 1866 verify_raid_bdev_present(name, false); 1867 } 1868 raid_bdev_exit(); 1869 for (i = 0; i < g_max_raids; i++) { 1870 free_test_req(&construct_req[i]); 1871 } 1872 free(construct_req); 1873 base_bdevs_cleanup(); 1874 reset_globals(); 1875 } 1876 1877 /* Create multiple raids, fire IOs on raids */ 1878 static void 1879 test_multi_raid_with_io(void) 1880 { 1881 struct rpc_bdev_raid_create *construct_req; 1882 struct rpc_bdev_raid_delete destroy_req; 1883 uint8_t i; 1884 char name[16]; 1885 uint8_t bbdev_idx = 0; 1886 struct raid_bdev *pbdev; 1887 struct spdk_io_channel **channels; 1888 struct spdk_bdev_io *bdev_io; 1889 uint64_t io_len; 1890 uint64_t lba = 0; 1891 int16_t iotype; 1892 1893 set_globals(); 1894 construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create)); 1895 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 1896 CU_ASSERT(raid_bdev_init() == 0); 1897 channels = calloc(g_max_raids, sizeof(*channels)); 1898 SPDK_CU_ASSERT_FATAL(channels != NULL); 1899 1900 for (i = 0; i < g_max_raids; i++) { 1901 snprintf(name, 16, "%s%u", "raid", i); 1902 verify_raid_bdev_present(name, false); 1903 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 1904 bbdev_idx += g_max_base_drives; 1905 rpc_bdev_raid_create(NULL, NULL); 1906 CU_ASSERT(g_rpc_err == 0); 1907 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 1908 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1909 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 1910 break; 1911 } 1912 } 1913 CU_ASSERT(pbdev != NULL); 1914 1915 channels[i] = spdk_get_io_channel(pbdev); 1916 SPDK_CU_ASSERT_FATAL(channels[i] != NULL); 1917 } 1918 1919 /* This will perform a write on the first raid and a read on the second. It can be 1920 * expanded in the future to perform r/w on each raid device in the event that 1921 * multiple raid levels are supported. 1922 */ 1923 for (i = 0; i < g_max_raids; i++) { 1924 struct spdk_io_channel *ch = channels[i]; 1925 struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch); 1926 1927 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1928 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1929 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1930 io_len = g_strip_size; 1931 iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ; 1932 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1933 g_io_output_index = 0; 1934 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1935 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 1936 break; 1937 } 1938 } 1939 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype); 1940 CU_ASSERT(pbdev != NULL); 1941 raid_bdev_submit_request(ch, bdev_io); 1942 verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev, 1943 g_child_io_status_flag); 1944 bdev_io_cleanup(bdev_io); 1945 } 1946 1947 for (i = 0; i < g_max_raids; i++) { 1948 spdk_put_io_channel(channels[i]); 1949 snprintf(name, 16, "%s", construct_req[i].name); 1950 create_raid_bdev_delete_req(&destroy_req, name, 0); 1951 rpc_bdev_raid_delete(NULL, NULL); 1952 CU_ASSERT(g_rpc_err == 0); 1953 verify_raid_bdev_present(name, false); 1954 } 1955 raid_bdev_exit(); 1956 for (i = 0; i < g_max_raids; i++) { 1957 free_test_req(&construct_req[i]); 1958 } 1959 free(construct_req); 1960 free(channels); 1961 base_bdevs_cleanup(); 1962 reset_globals(); 1963 } 1964 1965 static void 1966 test_io_type_supported(void) 1967 { 1968 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true); 1969 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true); 1970 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false); 1971 } 1972 1973 static void 1974 test_raid_json_dump_info(void) 1975 { 1976 struct rpc_bdev_raid_create req; 1977 struct rpc_bdev_raid_delete destroy_req; 1978 struct raid_bdev *pbdev; 1979 1980 set_globals(); 1981 CU_ASSERT(raid_bdev_init() == 0); 1982 1983 verify_raid_bdev_present("raid1", false); 1984 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1985 rpc_bdev_raid_create(NULL, NULL); 1986 CU_ASSERT(g_rpc_err == 0); 1987 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1988 1989 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1990 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1991 break; 1992 } 1993 } 1994 CU_ASSERT(pbdev != NULL); 1995 1996 CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0); 1997 1998 free_test_req(&req); 1999 2000 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2001 rpc_bdev_raid_delete(NULL, NULL); 2002 CU_ASSERT(g_rpc_err == 0); 2003 verify_raid_bdev_present("raid1", false); 2004 2005 raid_bdev_exit(); 2006 base_bdevs_cleanup(); 2007 reset_globals(); 2008 } 2009 2010 static void 2011 test_context_size(void) 2012 { 2013 CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io)); 2014 } 2015 2016 static void 2017 test_raid_level_conversions(void) 2018 { 2019 const char *raid_str; 2020 2021 CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL); 2022 CU_ASSERT(raid_bdev_str_to_level("0") == RAID0); 2023 CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0); 2024 CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0); 2025 2026 raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL); 2027 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 2028 raid_str = raid_bdev_level_to_str(1234); 2029 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 2030 raid_str = raid_bdev_level_to_str(RAID0); 2031 CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0); 2032 } 2033 2034 static void 2035 test_create_raid_superblock(void) 2036 { 2037 struct rpc_bdev_raid_create req; 2038 struct rpc_bdev_raid_delete delete_req; 2039 2040 set_globals(); 2041 CU_ASSERT(raid_bdev_init() == 0); 2042 2043 verify_raid_bdev_present("raid1", false); 2044 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true); 2045 rpc_bdev_raid_create(NULL, NULL); 2046 CU_ASSERT(g_rpc_err == 0); 2047 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2048 free_test_req(&req); 2049 2050 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 2051 rpc_bdev_raid_delete(NULL, NULL); 2052 CU_ASSERT(g_rpc_err == 0); 2053 raid_bdev_exit(); 2054 base_bdevs_cleanup(); 2055 reset_globals(); 2056 } 2057 2058 static void 2059 complete_process_request(void *ctx) 2060 { 2061 struct raid_bdev_process_request *process_req = ctx; 2062 2063 raid_bdev_process_request_complete(process_req, 0); 2064 } 2065 2066 static int 2067 submit_process_request(struct raid_bdev_process_request *process_req, 2068 struct raid_bdev_io_channel *raid_ch) 2069 { 2070 struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch)); 2071 2072 *(uint64_t *)raid_bdev->module_private += process_req->num_blocks; 2073 2074 spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req); 2075 2076 return process_req->num_blocks; 2077 } 2078 2079 static void 2080 test_raid_process(void) 2081 { 2082 struct rpc_bdev_raid_create req; 2083 struct rpc_bdev_raid_delete destroy_req; 2084 struct raid_bdev *pbdev; 2085 struct spdk_bdev *base_bdev; 2086 struct spdk_thread *process_thread; 2087 uint64_t num_blocks_processed = 0; 2088 2089 set_globals(); 2090 CU_ASSERT(raid_bdev_init() == 0); 2091 2092 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 2093 verify_raid_bdev_present("raid1", false); 2094 TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) { 2095 base_bdev->blockcnt = 128; 2096 } 2097 rpc_bdev_raid_create(NULL, NULL); 2098 CU_ASSERT(g_rpc_err == 0); 2099 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2100 free_test_req(&req); 2101 2102 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2103 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 2104 break; 2105 } 2106 } 2107 CU_ASSERT(pbdev != NULL); 2108 2109 pbdev->module->submit_process_request = submit_process_request; 2110 pbdev->module_private = &num_blocks_processed; 2111 2112 CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0); 2113 poll_threads(); 2114 2115 SPDK_CU_ASSERT_FATAL(pbdev->process != NULL); 2116 2117 process_thread = spdk_thread_get_by_id(spdk_thread_get_id(spdk_get_thread()) + 1); 2118 2119 while (spdk_thread_poll(process_thread, 0, 0) > 0) { 2120 poll_threads(); 2121 } 2122 2123 CU_ASSERT(pbdev->process == NULL); 2124 CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt); 2125 2126 poll_threads(); 2127 2128 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2129 rpc_bdev_raid_delete(NULL, NULL); 2130 CU_ASSERT(g_rpc_err == 0); 2131 verify_raid_bdev_present("raid1", false); 2132 2133 raid_bdev_exit(); 2134 base_bdevs_cleanup(); 2135 reset_globals(); 2136 } 2137 2138 static void 2139 test_raid_io_split(void) 2140 { 2141 struct rpc_bdev_raid_create req; 2142 struct rpc_bdev_raid_delete destroy_req; 2143 struct raid_bdev *pbdev; 2144 struct spdk_io_channel *ch; 2145 struct raid_bdev_io_channel *raid_ch; 2146 struct spdk_bdev_io *bdev_io; 2147 struct raid_bdev_io *raid_io; 2148 uint64_t split_offset; 2149 struct iovec iovs_orig[4]; 2150 struct raid_bdev_process process = { }; 2151 2152 set_globals(); 2153 CU_ASSERT(raid_bdev_init() == 0); 2154 2155 verify_raid_bdev_present("raid1", false); 2156 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 2157 rpc_bdev_raid_create(NULL, NULL); 2158 CU_ASSERT(g_rpc_err == 0); 2159 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2160 2161 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 2162 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 2163 break; 2164 } 2165 } 2166 CU_ASSERT(pbdev != NULL); 2167 pbdev->bdev.md_len = 8; 2168 2169 process.raid_bdev = pbdev; 2170 process.target = &pbdev->base_bdev_info[0]; 2171 pbdev->process = &process; 2172 ch = spdk_get_io_channel(pbdev); 2173 SPDK_CU_ASSERT_FATAL(ch != NULL); 2174 raid_ch = spdk_io_channel_get_ctx(ch); 2175 g_bdev_io_defer_completion = true; 2176 2177 /* test split of bdev_io with 1 iovec */ 2178 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 2179 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 2180 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 2181 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE); 2182 memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt); 2183 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2184 bdev_io->u.bdev.md_buf = (void *)0x1000000; 2185 g_io_output_index = 0; 2186 2187 split_offset = 1; 2188 raid_ch->process.offset = split_offset; 2189 raid_bdev_submit_request(ch, bdev_io); 2190 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2191 CU_ASSERT(raid_io->offset_blocks == split_offset); 2192 CU_ASSERT(raid_io->iovcnt == 1); 2193 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2194 CU_ASSERT(raid_io->iovs == raid_io->split.iov); 2195 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len); 2196 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len); 2197 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2198 complete_deferred_ios(); 2199 CU_ASSERT(raid_io->num_blocks == split_offset); 2200 CU_ASSERT(raid_io->offset_blocks == 0); 2201 CU_ASSERT(raid_io->iovcnt == 1); 2202 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base); 2203 CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len); 2204 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2205 complete_deferred_ios(); 2206 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2207 CU_ASSERT(raid_io->offset_blocks == 0); 2208 CU_ASSERT(raid_io->iovcnt == 1); 2209 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base); 2210 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len); 2211 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2212 2213 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2214 CU_ASSERT(g_io_output_index == 2); 2215 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2216 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2217 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2218 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2219 bdev_io_cleanup(bdev_io); 2220 2221 /* test split of bdev_io with 4 iovecs */ 2222 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 2223 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 2224 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 2225 _bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE, 2226 4, g_strip_size / 4 * g_block_len); 2227 memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt); 2228 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2229 bdev_io->u.bdev.md_buf = (void *)0x1000000; 2230 g_io_output_index = 0; 2231 2232 split_offset = 1; /* split at the first iovec */ 2233 raid_ch->process.offset = split_offset; 2234 raid_bdev_submit_request(ch, bdev_io); 2235 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2236 CU_ASSERT(raid_io->offset_blocks == split_offset); 2237 CU_ASSERT(raid_io->iovcnt == 4); 2238 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]); 2239 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]); 2240 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len); 2241 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len - g_block_len); 2242 CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0); 2243 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2244 complete_deferred_ios(); 2245 CU_ASSERT(raid_io->num_blocks == split_offset); 2246 CU_ASSERT(raid_io->offset_blocks == 0); 2247 CU_ASSERT(raid_io->iovcnt == 1); 2248 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2249 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base); 2250 CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len); 2251 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2252 complete_deferred_ios(); 2253 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2254 CU_ASSERT(raid_io->offset_blocks == 0); 2255 CU_ASSERT(raid_io->iovcnt == 4); 2256 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2257 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2258 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2259 2260 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2261 CU_ASSERT(g_io_output_index == 2); 2262 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2263 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2264 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2265 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2266 2267 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2268 g_io_output_index = 0; 2269 2270 split_offset = g_strip_size / 2; /* split exactly between second and third iovec */ 2271 raid_ch->process.offset = split_offset; 2272 raid_bdev_submit_request(ch, bdev_io); 2273 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2274 CU_ASSERT(raid_io->offset_blocks == split_offset); 2275 CU_ASSERT(raid_io->iovcnt == 2); 2276 CU_ASSERT(raid_io->split.iov == NULL); 2277 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]); 2278 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2279 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2280 complete_deferred_ios(); 2281 CU_ASSERT(raid_io->num_blocks == split_offset); 2282 CU_ASSERT(raid_io->offset_blocks == 0); 2283 CU_ASSERT(raid_io->iovcnt == 2); 2284 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2285 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2286 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2287 complete_deferred_ios(); 2288 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2289 CU_ASSERT(raid_io->offset_blocks == 0); 2290 CU_ASSERT(raid_io->iovcnt == 4); 2291 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2292 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2293 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2294 2295 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2296 CU_ASSERT(g_io_output_index == 2); 2297 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2298 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2299 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2300 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2301 2302 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2303 g_io_output_index = 0; 2304 2305 split_offset = g_strip_size / 2 + 1; /* split at the third iovec */ 2306 raid_ch->process.offset = split_offset; 2307 raid_bdev_submit_request(ch, bdev_io); 2308 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2309 CU_ASSERT(raid_io->offset_blocks == split_offset); 2310 CU_ASSERT(raid_io->iovcnt == 2); 2311 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]); 2312 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]); 2313 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len); 2314 CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len); 2315 CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base); 2316 CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len); 2317 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2318 complete_deferred_ios(); 2319 CU_ASSERT(raid_io->num_blocks == split_offset); 2320 CU_ASSERT(raid_io->offset_blocks == 0); 2321 CU_ASSERT(raid_io->iovcnt == 3); 2322 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2323 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0); 2324 CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base); 2325 CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len); 2326 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2327 complete_deferred_ios(); 2328 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2329 CU_ASSERT(raid_io->offset_blocks == 0); 2330 CU_ASSERT(raid_io->iovcnt == 4); 2331 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2332 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2333 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2334 2335 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2336 CU_ASSERT(g_io_output_index == 2); 2337 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2338 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2339 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2340 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2341 2342 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 2343 g_io_output_index = 0; 2344 2345 split_offset = g_strip_size - 1; /* split at the last iovec */ 2346 raid_ch->process.offset = split_offset; 2347 raid_bdev_submit_request(ch, bdev_io); 2348 CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset); 2349 CU_ASSERT(raid_io->offset_blocks == split_offset); 2350 CU_ASSERT(raid_io->iovcnt == 1); 2351 CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]); 2352 CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]); 2353 CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len); 2354 CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len); 2355 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len); 2356 complete_deferred_ios(); 2357 CU_ASSERT(raid_io->num_blocks == split_offset); 2358 CU_ASSERT(raid_io->offset_blocks == 0); 2359 CU_ASSERT(raid_io->iovcnt == 4); 2360 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2361 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0); 2362 CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base); 2363 CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len); 2364 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2365 complete_deferred_ios(); 2366 CU_ASSERT(raid_io->num_blocks == g_strip_size); 2367 CU_ASSERT(raid_io->offset_blocks == 0); 2368 CU_ASSERT(raid_io->iovcnt == 4); 2369 CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs); 2370 CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0); 2371 CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf); 2372 2373 CU_ASSERT(g_io_comp_status == g_child_io_status_flag); 2374 CU_ASSERT(g_io_output_index == 2); 2375 CU_ASSERT(g_io_output[0].offset_blocks == split_offset); 2376 CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset); 2377 CU_ASSERT(g_io_output[1].offset_blocks == 0); 2378 CU_ASSERT(g_io_output[1].num_blocks == split_offset); 2379 bdev_io_cleanup(bdev_io); 2380 2381 spdk_put_io_channel(ch); 2382 free_test_req(&req); 2383 pbdev->process = NULL; 2384 2385 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 2386 rpc_bdev_raid_delete(NULL, NULL); 2387 CU_ASSERT(g_rpc_err == 0); 2388 verify_raid_bdev_present("raid1", false); 2389 2390 raid_bdev_exit(); 2391 base_bdevs_cleanup(); 2392 reset_globals(); 2393 } 2394 2395 static int 2396 test_bdev_ioch_create(void *io_device, void *ctx_buf) 2397 { 2398 return 0; 2399 } 2400 2401 static void 2402 test_bdev_ioch_destroy(void *io_device, void *ctx_buf) 2403 { 2404 } 2405 2406 int 2407 main(int argc, char **argv) 2408 { 2409 CU_pSuite suite = NULL; 2410 unsigned int num_failures; 2411 2412 CU_initialize_registry(); 2413 2414 suite = CU_add_suite("raid", NULL, NULL); 2415 2416 CU_ADD_TEST(suite, test_create_raid); 2417 CU_ADD_TEST(suite, test_create_raid_superblock); 2418 CU_ADD_TEST(suite, test_delete_raid); 2419 CU_ADD_TEST(suite, test_create_raid_invalid_args); 2420 CU_ADD_TEST(suite, test_delete_raid_invalid_args); 2421 CU_ADD_TEST(suite, test_io_channel); 2422 CU_ADD_TEST(suite, test_reset_io); 2423 CU_ADD_TEST(suite, test_write_io); 2424 CU_ADD_TEST(suite, test_read_io); 2425 CU_ADD_TEST(suite, test_unmap_io); 2426 CU_ADD_TEST(suite, test_io_failure); 2427 CU_ADD_TEST(suite, test_multi_raid_no_io); 2428 CU_ADD_TEST(suite, test_multi_raid_with_io); 2429 CU_ADD_TEST(suite, test_io_type_supported); 2430 CU_ADD_TEST(suite, test_raid_json_dump_info); 2431 CU_ADD_TEST(suite, test_context_size); 2432 CU_ADD_TEST(suite, test_raid_level_conversions); 2433 CU_ADD_TEST(suite, test_raid_process); 2434 CU_ADD_TEST(suite, test_raid_io_split); 2435 2436 allocate_threads(1); 2437 set_thread(0); 2438 spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0, 2439 NULL); 2440 2441 set_test_opts(); 2442 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2443 CU_cleanup_registry(); 2444 2445 spdk_io_device_unregister(&g_bdev_ch_io_device, NULL); 2446 free_threads(); 2447 2448 return num_failures; 2449 } 2450