1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/env.h" 10 #include "spdk_internal/mock.h" 11 #include "thread/thread_internal.h" 12 #include "bdev/raid/bdev_raid.c" 13 #include "bdev/raid/bdev_raid_rpc.c" 14 #include "bdev/raid/raid0.c" 15 #include "common/lib/ut_multithread.c" 16 17 #define MAX_BASE_DRIVES 32 18 #define MAX_RAIDS 2 19 #define INVALID_IO_SUBMIT 0xFFFF 20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5)) 21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul) 22 23 struct spdk_bdev_channel { 24 struct spdk_io_channel *channel; 25 }; 26 27 struct spdk_bdev_desc { 28 struct spdk_bdev *bdev; 29 }; 30 31 /* Data structure to capture the output of IO for verification */ 32 struct io_output { 33 struct spdk_bdev_desc *desc; 34 struct spdk_io_channel *ch; 35 uint64_t offset_blocks; 36 uint64_t num_blocks; 37 spdk_bdev_io_completion_cb cb; 38 void *cb_arg; 39 enum spdk_bdev_io_type iotype; 40 }; 41 42 struct raid_io_ranges { 43 uint64_t lba; 44 uint64_t nblocks; 45 }; 46 47 /* Globals */ 48 int g_bdev_io_submit_status; 49 struct io_output *g_io_output = NULL; 50 uint32_t g_io_output_index; 51 uint32_t g_io_comp_status; 52 bool g_child_io_status_flag; 53 void *g_rpc_req; 54 uint32_t g_rpc_req_size; 55 TAILQ_HEAD(bdev, spdk_bdev); 56 struct bdev g_bdev_list; 57 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry); 58 struct waitq g_io_waitq; 59 uint32_t g_block_len; 60 uint32_t g_strip_size; 61 uint32_t g_max_io_size; 62 uint8_t g_max_base_drives; 63 uint8_t g_max_raids; 64 uint8_t g_ignore_io_output; 65 uint8_t g_rpc_err; 66 char *g_get_raids_output[MAX_RAIDS]; 67 uint32_t g_get_raids_count; 68 uint8_t g_json_decode_obj_err; 69 uint8_t g_json_decode_obj_create; 70 uint8_t g_config_level_create = 0; 71 uint8_t g_test_multi_raids; 72 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE]; 73 uint32_t g_io_range_idx; 74 uint64_t g_lba_offset; 75 struct spdk_io_channel g_io_channel; 76 77 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 78 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 79 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); 80 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, 81 enum spdk_bdev_io_type io_type), true); 82 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 83 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 84 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 85 void *cb_arg), 0); 86 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp), 87 NULL); 88 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func, 89 uint32_t state_mask)); 90 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias)); 91 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request, 92 struct spdk_json_write_ctx *w)); 93 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request, 94 bool value)); 95 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0); 96 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0); 97 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0); 98 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values, 99 spdk_json_decode_fn decode_func, 100 void *out, size_t max_size, size_t *out_size, size_t stride), 0); 101 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0); 102 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0); 103 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0); 104 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w, 105 const char *name), 0); 106 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0); 107 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0); 108 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0); 109 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0); 110 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w, 111 const char *name), 0); 112 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0); 113 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0); 114 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL); 115 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 116 struct spdk_bdev_io_wait_entry *entry), 0); 117 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev, 118 struct spdk_memory_domain **domains, int array_size), 0); 119 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev"); 120 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0); 121 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false); 122 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev), 123 SPDK_DIF_DISABLE); 124 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false); 125 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0); 126 127 struct spdk_io_channel * 128 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc) 129 { 130 g_io_channel.thread = spdk_get_thread(); 131 132 return &g_io_channel; 133 } 134 135 static void 136 set_test_opts(void) 137 { 138 139 g_max_base_drives = MAX_BASE_DRIVES; 140 g_max_raids = MAX_RAIDS; 141 g_block_len = 4096; 142 g_strip_size = 64; 143 g_max_io_size = 1024; 144 145 printf("Test Options\n"); 146 printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, " 147 "g_max_raids = %u\n", 148 g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids); 149 } 150 151 /* Set globals before every test run */ 152 static void 153 set_globals(void) 154 { 155 uint32_t max_splits; 156 157 g_bdev_io_submit_status = 0; 158 if (g_max_io_size < g_strip_size) { 159 max_splits = 2; 160 } else { 161 max_splits = (g_max_io_size / g_strip_size) + 1; 162 } 163 if (max_splits < g_max_base_drives) { 164 max_splits = g_max_base_drives; 165 } 166 167 g_io_output = calloc(max_splits, sizeof(struct io_output)); 168 SPDK_CU_ASSERT_FATAL(g_io_output != NULL); 169 g_io_output_index = 0; 170 memset(g_get_raids_output, 0, sizeof(g_get_raids_output)); 171 g_get_raids_count = 0; 172 g_io_comp_status = 0; 173 g_ignore_io_output = 0; 174 g_config_level_create = 0; 175 g_rpc_err = 0; 176 g_test_multi_raids = 0; 177 g_child_io_status_flag = true; 178 TAILQ_INIT(&g_bdev_list); 179 TAILQ_INIT(&g_io_waitq); 180 g_rpc_req = NULL; 181 g_rpc_req_size = 0; 182 g_json_decode_obj_err = 0; 183 g_json_decode_obj_create = 0; 184 g_lba_offset = 0; 185 } 186 187 static void 188 base_bdevs_cleanup(void) 189 { 190 struct spdk_bdev *bdev; 191 struct spdk_bdev *bdev_next; 192 193 if (!TAILQ_EMPTY(&g_bdev_list)) { 194 TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) { 195 free(bdev->name); 196 TAILQ_REMOVE(&g_bdev_list, bdev, internal.link); 197 free(bdev); 198 } 199 } 200 } 201 202 static void 203 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev) 204 { 205 struct raid_base_bdev_info *base_info; 206 207 assert(raid_bdev != NULL); 208 assert(raid_bdev->base_bdev_info != NULL); 209 210 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 211 if (base_info->desc) { 212 raid_bdev_free_base_bdev_resource(base_info); 213 } 214 } 215 assert(raid_bdev->num_base_bdevs_discovered == 0); 216 raid_bdev_cleanup_and_free(raid_bdev); 217 } 218 219 /* Reset globals */ 220 static void 221 reset_globals(void) 222 { 223 if (g_io_output) { 224 free(g_io_output); 225 g_io_output = NULL; 226 } 227 g_rpc_req = NULL; 228 g_rpc_req_size = 0; 229 } 230 231 void 232 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, 233 uint64_t len) 234 { 235 cb(bdev_io->internal.ch->channel, bdev_io, true); 236 } 237 238 /* Store the IO completion status in global variable to verify by various tests */ 239 void 240 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 241 { 242 g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false); 243 } 244 245 static void 246 set_io_output(struct io_output *output, 247 struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 248 uint64_t offset_blocks, uint64_t num_blocks, 249 spdk_bdev_io_completion_cb cb, void *cb_arg, 250 enum spdk_bdev_io_type iotype) 251 { 252 output->desc = desc; 253 output->ch = ch; 254 output->offset_blocks = offset_blocks; 255 output->num_blocks = num_blocks; 256 output->cb = cb; 257 output->cb_arg = cb_arg; 258 output->iotype = iotype; 259 } 260 261 /* It will cache the split IOs for verification */ 262 int 263 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 264 struct iovec *iov, int iovcnt, 265 uint64_t offset_blocks, uint64_t num_blocks, 266 spdk_bdev_io_completion_cb cb, void *cb_arg) 267 { 268 struct io_output *output = &g_io_output[g_io_output_index]; 269 struct spdk_bdev_io *child_io; 270 271 if (g_ignore_io_output) { 272 return 0; 273 } 274 275 if (g_max_io_size < g_strip_size) { 276 SPDK_CU_ASSERT_FATAL(g_io_output_index < 2); 277 } else { 278 SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1); 279 } 280 if (g_bdev_io_submit_status == 0) { 281 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 282 SPDK_BDEV_IO_TYPE_WRITE); 283 g_io_output_index++; 284 285 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 286 SPDK_CU_ASSERT_FATAL(child_io != NULL); 287 cb(child_io, g_child_io_status_flag, cb_arg); 288 } 289 290 return g_bdev_io_submit_status; 291 } 292 293 int 294 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 295 struct iovec *iov, int iovcnt, 296 uint64_t offset_blocks, uint64_t num_blocks, 297 spdk_bdev_io_completion_cb cb, void *cb_arg, 298 struct spdk_bdev_ext_io_opts *opts) 299 { 300 return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 301 } 302 303 int 304 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 305 struct iovec *iov, int iovcnt, void *md, 306 uint64_t offset_blocks, uint64_t num_blocks, 307 spdk_bdev_io_completion_cb cb, void *cb_arg) 308 { 309 return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 310 } 311 312 int 313 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 314 spdk_bdev_io_completion_cb cb, void *cb_arg) 315 { 316 struct io_output *output = &g_io_output[g_io_output_index]; 317 struct spdk_bdev_io *child_io; 318 319 if (g_ignore_io_output) { 320 return 0; 321 } 322 323 if (g_bdev_io_submit_status == 0) { 324 set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET); 325 g_io_output_index++; 326 327 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 328 SPDK_CU_ASSERT_FATAL(child_io != NULL); 329 cb(child_io, g_child_io_status_flag, cb_arg); 330 } 331 332 return g_bdev_io_submit_status; 333 } 334 335 int 336 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 337 uint64_t offset_blocks, uint64_t num_blocks, 338 spdk_bdev_io_completion_cb cb, void *cb_arg) 339 { 340 struct io_output *output = &g_io_output[g_io_output_index]; 341 struct spdk_bdev_io *child_io; 342 343 if (g_ignore_io_output) { 344 return 0; 345 } 346 347 if (g_bdev_io_submit_status == 0) { 348 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 349 SPDK_BDEV_IO_TYPE_UNMAP); 350 g_io_output_index++; 351 352 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 353 SPDK_CU_ASSERT_FATAL(child_io != NULL); 354 cb(child_io, g_child_io_status_flag, cb_arg); 355 } 356 357 return g_bdev_io_submit_status; 358 } 359 360 void 361 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno) 362 { 363 CU_ASSERT(bdeverrno == 0); 364 SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL); 365 bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno); 366 } 367 368 void 369 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 370 { 371 int ret; 372 373 bdev->internal.unregister_cb = cb_fn; 374 bdev->internal.unregister_ctx = cb_arg; 375 376 ret = bdev->fn_table->destruct(bdev->ctxt); 377 CU_ASSERT(ret == 1); 378 379 poll_threads(); 380 } 381 382 int 383 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb, 384 void *event_ctx, struct spdk_bdev_desc **_desc) 385 { 386 struct spdk_bdev *bdev; 387 388 bdev = spdk_bdev_get_by_name(bdev_name); 389 if (bdev == NULL) { 390 return -ENODEV; 391 } 392 393 *_desc = (void *)bdev; 394 return 0; 395 } 396 397 struct spdk_bdev * 398 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 399 { 400 return (void *)desc; 401 } 402 403 char * 404 spdk_sprintf_alloc(const char *format, ...) 405 { 406 return strdup(format); 407 } 408 409 int 410 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val) 411 { 412 if (!g_test_multi_raids) { 413 struct rpc_bdev_raid_create *req = g_rpc_req; 414 if (strcmp(name, "strip_size_kb") == 0) { 415 CU_ASSERT(req->strip_size_kb == val); 416 } else if (strcmp(name, "blocklen_shift") == 0) { 417 CU_ASSERT(spdk_u32log2(g_block_len) == val); 418 } else if (strcmp(name, "num_base_bdevs") == 0) { 419 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 420 } else if (strcmp(name, "state") == 0) { 421 CU_ASSERT(val == RAID_BDEV_STATE_ONLINE); 422 } else if (strcmp(name, "destruct_called") == 0) { 423 CU_ASSERT(val == 0); 424 } else if (strcmp(name, "num_base_bdevs_discovered") == 0) { 425 CU_ASSERT(req->base_bdevs.num_base_bdevs == val); 426 } 427 } 428 return 0; 429 } 430 431 int 432 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val) 433 { 434 if (g_test_multi_raids) { 435 if (strcmp(name, "name") == 0) { 436 g_get_raids_output[g_get_raids_count] = strdup(val); 437 SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL); 438 g_get_raids_count++; 439 } 440 } else { 441 struct rpc_bdev_raid_create *req = g_rpc_req; 442 if (strcmp(name, "raid_level") == 0) { 443 CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0); 444 } 445 } 446 return 0; 447 } 448 449 int 450 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val) 451 { 452 if (!g_test_multi_raids) { 453 struct rpc_bdev_raid_create *req = g_rpc_req; 454 if (strcmp(name, "superblock") == 0) { 455 CU_ASSERT(val == req->superblock_enabled); 456 } 457 } 458 return 0; 459 } 460 461 void 462 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io) 463 { 464 if (bdev_io) { 465 free(bdev_io); 466 } 467 } 468 469 /* It will cache split IOs for verification */ 470 int 471 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 472 struct iovec *iov, int iovcnt, 473 uint64_t offset_blocks, uint64_t num_blocks, 474 spdk_bdev_io_completion_cb cb, void *cb_arg) 475 { 476 struct io_output *output = &g_io_output[g_io_output_index]; 477 struct spdk_bdev_io *child_io; 478 479 if (g_ignore_io_output) { 480 return 0; 481 } 482 483 SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1); 484 if (g_bdev_io_submit_status == 0) { 485 set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg, 486 SPDK_BDEV_IO_TYPE_READ); 487 g_io_output_index++; 488 489 child_io = calloc(1, sizeof(struct spdk_bdev_io)); 490 SPDK_CU_ASSERT_FATAL(child_io != NULL); 491 cb(child_io, g_child_io_status_flag, cb_arg); 492 } 493 494 return g_bdev_io_submit_status; 495 } 496 497 int 498 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 499 struct iovec *iov, int iovcnt, 500 uint64_t offset_blocks, uint64_t num_blocks, 501 spdk_bdev_io_completion_cb cb, void *cb_arg, 502 struct spdk_bdev_ext_io_opts *opts) 503 { 504 return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 505 } 506 507 int 508 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 509 struct iovec *iov, int iovcnt, void *md, 510 uint64_t offset_blocks, uint64_t num_blocks, 511 spdk_bdev_io_completion_cb cb, void *cb_arg) 512 { 513 return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg); 514 } 515 516 517 void 518 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev) 519 { 520 CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE); 521 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 522 bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE; 523 bdev->internal.claim.v1.module = NULL; 524 } 525 526 int 527 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 528 struct spdk_bdev_module *module) 529 { 530 if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) { 531 CU_ASSERT(bdev->internal.claim.v1.module != NULL); 532 return -1; 533 } 534 CU_ASSERT(bdev->internal.claim.v1.module == NULL); 535 bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE; 536 bdev->internal.claim.v1.module = module; 537 return 0; 538 } 539 540 int 541 spdk_json_decode_object(const struct spdk_json_val *values, 542 const struct spdk_json_object_decoder *decoders, size_t num_decoders, 543 void *out) 544 { 545 struct rpc_bdev_raid_create *req, *_out; 546 size_t i; 547 548 if (g_json_decode_obj_err) { 549 return -1; 550 } else if (g_json_decode_obj_create) { 551 req = g_rpc_req; 552 _out = out; 553 554 _out->name = strdup(req->name); 555 SPDK_CU_ASSERT_FATAL(_out->name != NULL); 556 _out->strip_size_kb = req->strip_size_kb; 557 _out->level = req->level; 558 _out->superblock_enabled = req->superblock_enabled; 559 _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs; 560 for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) { 561 _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]); 562 SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]); 563 } 564 } else { 565 memcpy(out, g_rpc_req, g_rpc_req_size); 566 } 567 568 return 0; 569 } 570 571 struct spdk_json_write_ctx * 572 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request) 573 { 574 return (void *)1; 575 } 576 577 void 578 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request, 579 int error_code, const char *msg) 580 { 581 g_rpc_err = 1; 582 } 583 584 void 585 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request, 586 int error_code, const char *fmt, ...) 587 { 588 g_rpc_err = 1; 589 } 590 591 struct spdk_bdev * 592 spdk_bdev_get_by_name(const char *bdev_name) 593 { 594 struct spdk_bdev *bdev; 595 596 if (!TAILQ_EMPTY(&g_bdev_list)) { 597 TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) { 598 if (strcmp(bdev_name, bdev->name) == 0) { 599 return bdev; 600 } 601 } 602 } 603 604 return NULL; 605 } 606 607 int 608 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 609 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 610 { 611 if (cb_fn) { 612 cb_fn(cb_arg, 0); 613 } 614 615 return 0; 616 } 617 618 int 619 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module, 620 spdk_bdev_quiesce_cb cb_fn, void *cb_arg) 621 { 622 if (cb_fn) { 623 cb_fn(cb_arg, 0); 624 } 625 626 return 0; 627 } 628 629 static void 630 bdev_io_cleanup(struct spdk_bdev_io *bdev_io) 631 { 632 if (bdev_io->u.bdev.iovs) { 633 if (bdev_io->u.bdev.iovs->iov_base) { 634 free(bdev_io->u.bdev.iovs->iov_base); 635 } 636 free(bdev_io->u.bdev.iovs); 637 } 638 free(bdev_io); 639 } 640 641 static void 642 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev, 643 uint64_t lba, uint64_t blocks, int16_t iotype) 644 { 645 struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); 646 647 bdev_io->bdev = bdev; 648 bdev_io->u.bdev.offset_blocks = lba; 649 bdev_io->u.bdev.num_blocks = blocks; 650 bdev_io->type = iotype; 651 652 if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { 653 return; 654 } 655 656 bdev_io->u.bdev.iovcnt = 1; 657 bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec)); 658 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL); 659 bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len); 660 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL); 661 bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len; 662 bdev_io->internal.ch = channel; 663 } 664 665 static void 666 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 667 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 668 { 669 uint8_t index = 0; 670 struct io_output *output; 671 672 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 673 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 674 SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT); 675 SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); 676 677 CU_ASSERT(g_io_output_index == num_base_drives); 678 for (index = 0; index < g_io_output_index; index++) { 679 output = &g_io_output[index]; 680 CU_ASSERT(ch_ctx->base_channel[index] == output->ch); 681 CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc); 682 CU_ASSERT(bdev_io->type == output->iotype); 683 } 684 CU_ASSERT(g_io_comp_status == io_status); 685 } 686 687 static void 688 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 689 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status) 690 { 691 uint32_t strip_shift = spdk_u32log2(g_strip_size); 692 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 693 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 694 strip_shift; 695 uint32_t splits_reqd = (end_strip - start_strip + 1); 696 uint32_t strip; 697 uint64_t pd_strip; 698 uint8_t pd_idx; 699 uint32_t offset_in_strip; 700 uint64_t pd_lba; 701 uint64_t pd_blocks; 702 uint32_t index = 0; 703 struct io_output *output; 704 705 if (io_status == INVALID_IO_SUBMIT) { 706 CU_ASSERT(g_io_comp_status == false); 707 return; 708 } 709 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 710 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 711 712 CU_ASSERT(splits_reqd == g_io_output_index); 713 for (strip = start_strip; strip <= end_strip; strip++, index++) { 714 pd_strip = strip / num_base_drives; 715 pd_idx = strip % num_base_drives; 716 if (strip == start_strip) { 717 offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1); 718 pd_lba = (pd_strip << strip_shift) + offset_in_strip; 719 if (strip == end_strip) { 720 pd_blocks = bdev_io->u.bdev.num_blocks; 721 } else { 722 pd_blocks = g_strip_size - offset_in_strip; 723 } 724 } else if (strip == end_strip) { 725 pd_lba = pd_strip << strip_shift; 726 pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) & 727 (g_strip_size - 1)) + 1; 728 } else { 729 pd_lba = pd_strip << raid_bdev->strip_size_shift; 730 pd_blocks = raid_bdev->strip_size; 731 } 732 output = &g_io_output[index]; 733 CU_ASSERT(pd_lba == output->offset_blocks); 734 CU_ASSERT(pd_blocks == output->num_blocks); 735 CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch); 736 CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc); 737 CU_ASSERT(bdev_io->type == output->iotype); 738 } 739 CU_ASSERT(g_io_comp_status == io_status); 740 } 741 742 static void 743 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives, 744 struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, 745 uint32_t io_status) 746 { 747 uint32_t strip_shift = spdk_u32log2(g_strip_size); 748 uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size; 749 uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) % 750 g_strip_size; 751 uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift; 752 uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 753 strip_shift; 754 uint8_t n_disks_involved; 755 uint64_t start_strip_disk_idx; 756 uint64_t end_strip_disk_idx; 757 uint64_t nblocks_in_start_disk; 758 uint64_t offset_in_start_disk; 759 uint8_t disk_idx; 760 uint64_t base_io_idx; 761 uint64_t sum_nblocks = 0; 762 struct io_output *output; 763 764 if (io_status == INVALID_IO_SUBMIT) { 765 CU_ASSERT(g_io_comp_status == false); 766 return; 767 } 768 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 769 SPDK_CU_ASSERT_FATAL(num_base_drives != 0); 770 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ); 771 SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE); 772 773 n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives); 774 CU_ASSERT(n_disks_involved == g_io_output_index); 775 776 start_strip_disk_idx = start_strip % num_base_drives; 777 end_strip_disk_idx = end_strip % num_base_drives; 778 779 offset_in_start_disk = g_io_output[0].offset_blocks; 780 nblocks_in_start_disk = g_io_output[0].num_blocks; 781 782 for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved; 783 base_io_idx++, disk_idx++) { 784 uint64_t start_offset_in_disk; 785 uint64_t end_offset_in_disk; 786 787 output = &g_io_output[base_io_idx]; 788 789 /* round disk_idx */ 790 if (disk_idx >= num_base_drives) { 791 disk_idx %= num_base_drives; 792 } 793 794 /* start_offset_in_disk aligned in strip check: 795 * The first base io has a same start_offset_in_strip with the whole raid io. 796 * Other base io should have aligned start_offset_in_strip which is 0. 797 */ 798 start_offset_in_disk = output->offset_blocks; 799 if (base_io_idx == 0) { 800 CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip); 801 } else { 802 CU_ASSERT(start_offset_in_disk % g_strip_size == 0); 803 } 804 805 /* end_offset_in_disk aligned in strip check: 806 * Base io on disk at which end_strip is located, has a same end_offset_in_strip 807 * with the whole raid io. 808 * Other base io should have aligned end_offset_in_strip. 809 */ 810 end_offset_in_disk = output->offset_blocks + output->num_blocks - 1; 811 if (disk_idx == end_strip_disk_idx) { 812 CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip); 813 } else { 814 CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1); 815 } 816 817 /* start_offset_in_disk compared with start_disk. 818 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk 819 * mustn't be larger than the start offset of start_offset_in_disk; And the gap 820 * must be less than strip size. 821 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk 822 * must be larger than the start offset of start_offset_in_disk; And the gap mustn't 823 * be less than strip size. 824 */ 825 if (disk_idx > start_strip_disk_idx) { 826 CU_ASSERT(start_offset_in_disk <= offset_in_start_disk); 827 CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size); 828 } else if (disk_idx < start_strip_disk_idx) { 829 CU_ASSERT(start_offset_in_disk > offset_in_start_disk); 830 CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size); 831 } 832 833 /* nblocks compared with start_disk: 834 * The gap between them must be within a strip size. 835 */ 836 if (output->num_blocks <= nblocks_in_start_disk) { 837 CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size); 838 } else { 839 CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size); 840 } 841 842 sum_nblocks += output->num_blocks; 843 844 CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch); 845 CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc); 846 CU_ASSERT(bdev_io->type == output->iotype); 847 } 848 849 /* Sum of each nblocks should be same with raid bdev_io */ 850 CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks); 851 852 CU_ASSERT(g_io_comp_status == io_status); 853 } 854 855 static void 856 verify_raid_bdev_present(const char *name, bool presence) 857 { 858 struct raid_bdev *pbdev; 859 bool pbdev_found; 860 861 pbdev_found = false; 862 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 863 if (strcmp(pbdev->bdev.name, name) == 0) { 864 pbdev_found = true; 865 break; 866 } 867 } 868 if (presence == true) { 869 CU_ASSERT(pbdev_found == true); 870 } else { 871 CU_ASSERT(pbdev_found == false); 872 } 873 } 874 875 static void 876 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state) 877 { 878 struct raid_bdev *pbdev; 879 struct raid_base_bdev_info *base_info; 880 struct spdk_bdev *bdev = NULL; 881 bool pbdev_found; 882 uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF; 883 884 pbdev_found = false; 885 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 886 if (strcmp(pbdev->bdev.name, r->name) == 0) { 887 pbdev_found = true; 888 if (presence == false) { 889 break; 890 } 891 CU_ASSERT(pbdev->base_bdev_info != NULL); 892 CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len)); 893 CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) / 894 g_block_len))); 895 CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len)); 896 CU_ASSERT((uint32_t)pbdev->state == raid_state); 897 CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs); 898 CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs); 899 CU_ASSERT(pbdev->level == r->level); 900 CU_ASSERT(pbdev->base_bdev_info != NULL); 901 RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) { 902 CU_ASSERT(base_info->desc != NULL); 903 bdev = spdk_bdev_desc_get_bdev(base_info->desc); 904 CU_ASSERT(bdev != NULL); 905 CU_ASSERT(base_info->remove_scheduled == false); 906 CU_ASSERT((pbdev->superblock_enabled == true && base_info->data_offset != 0) || 907 (pbdev->superblock_enabled == false && base_info->data_offset == 0)); 908 CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt); 909 910 if (bdev && base_info->data_size < min_blockcnt) { 911 min_blockcnt = base_info->data_size; 912 } 913 } 914 CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) * 915 (r->strip_size_kb * 1024 / g_block_len)) * 916 r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt); 917 CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0); 918 CU_ASSERT(pbdev->bdev.write_cache == 0); 919 CU_ASSERT(pbdev->bdev.blocklen == g_block_len); 920 if (pbdev->num_base_bdevs > 1) { 921 CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size); 922 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true); 923 } else { 924 CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0); 925 CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false); 926 } 927 CU_ASSERT(pbdev->bdev.ctxt == pbdev); 928 CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table); 929 CU_ASSERT(pbdev->bdev.module == &g_raid_if); 930 break; 931 } 932 } 933 if (presence == true) { 934 CU_ASSERT(pbdev_found == true); 935 } else { 936 CU_ASSERT(pbdev_found == false); 937 } 938 } 939 940 static void 941 verify_get_raids(struct rpc_bdev_raid_create *construct_req, 942 uint8_t g_max_raids, 943 char **g_get_raids_output, uint32_t g_get_raids_count) 944 { 945 uint8_t i, j; 946 bool found; 947 948 CU_ASSERT(g_max_raids == g_get_raids_count); 949 if (g_max_raids == g_get_raids_count) { 950 for (i = 0; i < g_max_raids; i++) { 951 found = false; 952 for (j = 0; j < g_max_raids; j++) { 953 if (construct_req[i].name && 954 strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) { 955 found = true; 956 break; 957 } 958 } 959 CU_ASSERT(found == true); 960 } 961 } 962 } 963 964 static void 965 create_base_bdevs(uint32_t bbdev_start_idx) 966 { 967 uint8_t i; 968 struct spdk_bdev *base_bdev; 969 char name[16]; 970 971 for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) { 972 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1"); 973 base_bdev = calloc(1, sizeof(struct spdk_bdev)); 974 SPDK_CU_ASSERT_FATAL(base_bdev != NULL); 975 base_bdev->name = strdup(name); 976 SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL); 977 base_bdev->blocklen = g_block_len; 978 base_bdev->blockcnt = BLOCK_CNT; 979 TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link); 980 } 981 } 982 983 static void 984 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name, 985 uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled) 986 { 987 uint8_t i; 988 char name[16]; 989 uint8_t bbdev_idx = bbdev_start_idx; 990 991 r->name = strdup(raid_name); 992 SPDK_CU_ASSERT_FATAL(r->name != NULL); 993 r->strip_size_kb = (g_strip_size * g_block_len) / 1024; 994 r->level = RAID0; 995 r->superblock_enabled = superblock_enabled; 996 r->base_bdevs.num_base_bdevs = g_max_base_drives; 997 for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) { 998 snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1"); 999 r->base_bdevs.base_bdevs[i] = strdup(name); 1000 SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL); 1001 } 1002 if (create_base_bdev == true) { 1003 create_base_bdevs(bbdev_start_idx); 1004 } 1005 g_rpc_req = r; 1006 g_rpc_req_size = sizeof(*r); 1007 } 1008 1009 static void 1010 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name, 1011 uint8_t bbdev_start_idx, bool create_base_bdev, 1012 uint8_t json_decode_obj_err, bool superblock) 1013 { 1014 create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock); 1015 1016 g_rpc_err = 0; 1017 g_json_decode_obj_create = 1; 1018 g_json_decode_obj_err = json_decode_obj_err; 1019 g_config_level_create = 0; 1020 g_test_multi_raids = 0; 1021 } 1022 1023 static void 1024 free_test_req(struct rpc_bdev_raid_create *r) 1025 { 1026 uint8_t i; 1027 1028 free(r->name); 1029 for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) { 1030 free(r->base_bdevs.base_bdevs[i]); 1031 } 1032 } 1033 1034 static void 1035 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name, 1036 uint8_t json_decode_obj_err) 1037 { 1038 r->name = strdup(raid_name); 1039 SPDK_CU_ASSERT_FATAL(r->name != NULL); 1040 1041 g_rpc_req = r; 1042 g_rpc_req_size = sizeof(*r); 1043 g_rpc_err = 0; 1044 g_json_decode_obj_create = 0; 1045 g_json_decode_obj_err = json_decode_obj_err; 1046 g_config_level_create = 0; 1047 g_test_multi_raids = 0; 1048 } 1049 1050 static void 1051 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category, 1052 uint8_t json_decode_obj_err) 1053 { 1054 r->category = strdup(category); 1055 SPDK_CU_ASSERT_FATAL(r->category != NULL); 1056 1057 g_rpc_req = r; 1058 g_rpc_req_size = sizeof(*r); 1059 g_rpc_err = 0; 1060 g_json_decode_obj_create = 0; 1061 g_json_decode_obj_err = json_decode_obj_err; 1062 g_config_level_create = 0; 1063 g_test_multi_raids = 1; 1064 g_get_raids_count = 0; 1065 } 1066 1067 static void 1068 test_create_raid(void) 1069 { 1070 struct rpc_bdev_raid_create req; 1071 struct rpc_bdev_raid_delete delete_req; 1072 1073 set_globals(); 1074 CU_ASSERT(raid_bdev_init() == 0); 1075 1076 verify_raid_bdev_present("raid1", false); 1077 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1078 rpc_bdev_raid_create(NULL, NULL); 1079 CU_ASSERT(g_rpc_err == 0); 1080 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1081 free_test_req(&req); 1082 1083 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1084 rpc_bdev_raid_delete(NULL, NULL); 1085 CU_ASSERT(g_rpc_err == 0); 1086 raid_bdev_exit(); 1087 base_bdevs_cleanup(); 1088 reset_globals(); 1089 } 1090 1091 static void 1092 test_delete_raid(void) 1093 { 1094 struct rpc_bdev_raid_create construct_req; 1095 struct rpc_bdev_raid_delete delete_req; 1096 1097 set_globals(); 1098 CU_ASSERT(raid_bdev_init() == 0); 1099 1100 verify_raid_bdev_present("raid1", false); 1101 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1102 rpc_bdev_raid_create(NULL, NULL); 1103 CU_ASSERT(g_rpc_err == 0); 1104 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1105 free_test_req(&construct_req); 1106 1107 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 1108 rpc_bdev_raid_delete(NULL, NULL); 1109 CU_ASSERT(g_rpc_err == 0); 1110 verify_raid_bdev_present("raid1", false); 1111 1112 raid_bdev_exit(); 1113 base_bdevs_cleanup(); 1114 reset_globals(); 1115 } 1116 1117 static void 1118 test_create_raid_invalid_args(void) 1119 { 1120 struct rpc_bdev_raid_create req; 1121 struct rpc_bdev_raid_delete destroy_req; 1122 struct raid_bdev *raid_bdev; 1123 1124 set_globals(); 1125 CU_ASSERT(raid_bdev_init() == 0); 1126 1127 verify_raid_bdev_present("raid1", false); 1128 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1129 req.level = INVALID_RAID_LEVEL; 1130 rpc_bdev_raid_create(NULL, NULL); 1131 CU_ASSERT(g_rpc_err == 1); 1132 free_test_req(&req); 1133 verify_raid_bdev_present("raid1", false); 1134 1135 create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false); 1136 rpc_bdev_raid_create(NULL, NULL); 1137 CU_ASSERT(g_rpc_err == 1); 1138 free_test_req(&req); 1139 verify_raid_bdev_present("raid1", false); 1140 1141 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1142 req.strip_size_kb = 1231; 1143 rpc_bdev_raid_create(NULL, NULL); 1144 CU_ASSERT(g_rpc_err == 1); 1145 free_test_req(&req); 1146 verify_raid_bdev_present("raid1", false); 1147 1148 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1149 rpc_bdev_raid_create(NULL, NULL); 1150 CU_ASSERT(g_rpc_err == 0); 1151 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1152 free_test_req(&req); 1153 1154 create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false); 1155 rpc_bdev_raid_create(NULL, NULL); 1156 CU_ASSERT(g_rpc_err == 1); 1157 free_test_req(&req); 1158 1159 create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false); 1160 rpc_bdev_raid_create(NULL, NULL); 1161 CU_ASSERT(g_rpc_err == 1); 1162 free_test_req(&req); 1163 verify_raid_bdev_present("raid2", false); 1164 1165 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1166 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1167 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1"); 1168 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1169 rpc_bdev_raid_create(NULL, NULL); 1170 CU_ASSERT(g_rpc_err == 1); 1171 free_test_req(&req); 1172 verify_raid_bdev_present("raid2", false); 1173 1174 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false); 1175 free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]); 1176 req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1"); 1177 SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL); 1178 rpc_bdev_raid_create(NULL, NULL); 1179 CU_ASSERT(g_rpc_err == 0); 1180 free_test_req(&req); 1181 verify_raid_bdev_present("raid2", true); 1182 raid_bdev = raid_bdev_find_by_name("raid2"); 1183 SPDK_CU_ASSERT_FATAL(raid_bdev != NULL); 1184 check_and_remove_raid_bdev(raid_bdev); 1185 1186 create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false); 1187 rpc_bdev_raid_create(NULL, NULL); 1188 CU_ASSERT(g_rpc_err == 0); 1189 free_test_req(&req); 1190 verify_raid_bdev_present("raid2", true); 1191 verify_raid_bdev_present("raid1", true); 1192 1193 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1194 rpc_bdev_raid_delete(NULL, NULL); 1195 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1196 rpc_bdev_raid_delete(NULL, NULL); 1197 raid_bdev_exit(); 1198 base_bdevs_cleanup(); 1199 reset_globals(); 1200 } 1201 1202 static void 1203 test_delete_raid_invalid_args(void) 1204 { 1205 struct rpc_bdev_raid_create construct_req; 1206 struct rpc_bdev_raid_delete destroy_req; 1207 1208 set_globals(); 1209 CU_ASSERT(raid_bdev_init() == 0); 1210 1211 verify_raid_bdev_present("raid1", false); 1212 create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false); 1213 rpc_bdev_raid_create(NULL, NULL); 1214 CU_ASSERT(g_rpc_err == 0); 1215 verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE); 1216 free_test_req(&construct_req); 1217 1218 create_raid_bdev_delete_req(&destroy_req, "raid2", 0); 1219 rpc_bdev_raid_delete(NULL, NULL); 1220 CU_ASSERT(g_rpc_err == 1); 1221 1222 create_raid_bdev_delete_req(&destroy_req, "raid1", 1); 1223 rpc_bdev_raid_delete(NULL, NULL); 1224 CU_ASSERT(g_rpc_err == 1); 1225 free(destroy_req.name); 1226 verify_raid_bdev_present("raid1", true); 1227 1228 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1229 rpc_bdev_raid_delete(NULL, NULL); 1230 CU_ASSERT(g_rpc_err == 0); 1231 verify_raid_bdev_present("raid1", false); 1232 1233 raid_bdev_exit(); 1234 base_bdevs_cleanup(); 1235 reset_globals(); 1236 } 1237 1238 static void 1239 test_io_channel(void) 1240 { 1241 struct rpc_bdev_raid_create req; 1242 struct rpc_bdev_raid_delete destroy_req; 1243 struct raid_bdev *pbdev; 1244 struct raid_bdev_io_channel *ch_ctx; 1245 uint8_t i; 1246 1247 set_globals(); 1248 CU_ASSERT(raid_bdev_init() == 0); 1249 1250 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1251 verify_raid_bdev_present("raid1", false); 1252 rpc_bdev_raid_create(NULL, NULL); 1253 CU_ASSERT(g_rpc_err == 0); 1254 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1255 1256 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1257 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1258 break; 1259 } 1260 } 1261 CU_ASSERT(pbdev != NULL); 1262 ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel)); 1263 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1264 1265 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1266 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1267 CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1268 } 1269 raid_bdev_destroy_cb(pbdev, ch_ctx); 1270 CU_ASSERT(ch_ctx->base_channel == NULL); 1271 free_test_req(&req); 1272 1273 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1274 rpc_bdev_raid_delete(NULL, NULL); 1275 CU_ASSERT(g_rpc_err == 0); 1276 verify_raid_bdev_present("raid1", false); 1277 1278 free(ch_ctx); 1279 raid_bdev_exit(); 1280 base_bdevs_cleanup(); 1281 reset_globals(); 1282 } 1283 1284 static void 1285 test_write_io(void) 1286 { 1287 struct rpc_bdev_raid_create req; 1288 struct rpc_bdev_raid_delete destroy_req; 1289 struct raid_bdev *pbdev; 1290 struct spdk_io_channel *ch; 1291 struct raid_bdev_io_channel *ch_ctx; 1292 uint8_t i; 1293 struct spdk_bdev_io *bdev_io; 1294 uint64_t io_len; 1295 uint64_t lba = 0; 1296 struct spdk_io_channel *ch_b; 1297 struct spdk_bdev_channel *ch_b_ctx; 1298 1299 set_globals(); 1300 CU_ASSERT(raid_bdev_init() == 0); 1301 1302 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1303 verify_raid_bdev_present("raid1", false); 1304 rpc_bdev_raid_create(NULL, NULL); 1305 CU_ASSERT(g_rpc_err == 0); 1306 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1307 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1308 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1309 break; 1310 } 1311 } 1312 CU_ASSERT(pbdev != NULL); 1313 ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1314 SPDK_CU_ASSERT_FATAL(ch != NULL); 1315 1316 ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); 1317 SPDK_CU_ASSERT_FATAL(ch_b != NULL); 1318 ch_b_ctx = spdk_io_channel_get_ctx(ch_b); 1319 ch_b_ctx->channel = ch; 1320 1321 ch_ctx = spdk_io_channel_get_ctx(ch); 1322 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1323 1324 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1325 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1326 CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1327 } 1328 1329 /* test 2 IO sizes based on global strip size set earlier */ 1330 for (i = 0; i < 2; i++) { 1331 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1332 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1333 io_len = (g_strip_size / 2) << i; 1334 bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1335 lba += g_strip_size; 1336 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1337 g_io_output_index = 0; 1338 raid_bdev_submit_request(ch, bdev_io); 1339 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1340 g_child_io_status_flag); 1341 bdev_io_cleanup(bdev_io); 1342 } 1343 1344 free_test_req(&req); 1345 raid_bdev_destroy_cb(pbdev, ch_ctx); 1346 CU_ASSERT(ch_ctx->base_channel == NULL); 1347 free(ch); 1348 free(ch_b); 1349 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1350 rpc_bdev_raid_delete(NULL, NULL); 1351 CU_ASSERT(g_rpc_err == 0); 1352 verify_raid_bdev_present("raid1", false); 1353 1354 raid_bdev_exit(); 1355 base_bdevs_cleanup(); 1356 reset_globals(); 1357 } 1358 1359 static void 1360 test_read_io(void) 1361 { 1362 struct rpc_bdev_raid_create req; 1363 struct rpc_bdev_raid_delete destroy_req; 1364 struct raid_bdev *pbdev; 1365 struct spdk_io_channel *ch; 1366 struct raid_bdev_io_channel *ch_ctx; 1367 uint8_t i; 1368 struct spdk_bdev_io *bdev_io; 1369 uint64_t io_len; 1370 uint64_t lba; 1371 struct spdk_io_channel *ch_b; 1372 struct spdk_bdev_channel *ch_b_ctx; 1373 1374 set_globals(); 1375 CU_ASSERT(raid_bdev_init() == 0); 1376 1377 verify_raid_bdev_present("raid1", false); 1378 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1379 rpc_bdev_raid_create(NULL, NULL); 1380 CU_ASSERT(g_rpc_err == 0); 1381 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1382 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1383 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1384 break; 1385 } 1386 } 1387 CU_ASSERT(pbdev != NULL); 1388 ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1389 SPDK_CU_ASSERT_FATAL(ch != NULL); 1390 1391 ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); 1392 SPDK_CU_ASSERT_FATAL(ch_b != NULL); 1393 ch_b_ctx = spdk_io_channel_get_ctx(ch_b); 1394 ch_b_ctx->channel = ch; 1395 1396 ch_ctx = spdk_io_channel_get_ctx(ch); 1397 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1398 1399 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1400 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1401 CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1402 } 1403 free_test_req(&req); 1404 1405 /* test 2 IO sizes based on global strip size set earlier */ 1406 lba = 0; 1407 for (i = 0; i < 2; i++) { 1408 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1409 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1410 io_len = (g_strip_size / 2) << i; 1411 bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ); 1412 lba += g_strip_size; 1413 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1414 g_io_output_index = 0; 1415 raid_bdev_submit_request(ch, bdev_io); 1416 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1417 g_child_io_status_flag); 1418 bdev_io_cleanup(bdev_io); 1419 } 1420 1421 raid_bdev_destroy_cb(pbdev, ch_ctx); 1422 CU_ASSERT(ch_ctx->base_channel == NULL); 1423 free(ch); 1424 free(ch_b); 1425 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1426 rpc_bdev_raid_delete(NULL, NULL); 1427 CU_ASSERT(g_rpc_err == 0); 1428 verify_raid_bdev_present("raid1", false); 1429 1430 raid_bdev_exit(); 1431 base_bdevs_cleanup(); 1432 reset_globals(); 1433 } 1434 1435 static void 1436 raid_bdev_io_generate_by_strips(uint64_t n_strips) 1437 { 1438 uint64_t lba; 1439 uint64_t nblocks; 1440 uint64_t start_offset; 1441 uint64_t end_offset; 1442 uint64_t offsets_in_strip[3]; 1443 uint64_t start_bdev_idx; 1444 uint64_t start_bdev_offset; 1445 uint64_t start_bdev_idxs[3]; 1446 int i, j, l; 1447 1448 /* 3 different situations of offset in strip */ 1449 offsets_in_strip[0] = 0; 1450 offsets_in_strip[1] = g_strip_size >> 1; 1451 offsets_in_strip[2] = g_strip_size - 1; 1452 1453 /* 3 different situations of start_bdev_idx */ 1454 start_bdev_idxs[0] = 0; 1455 start_bdev_idxs[1] = g_max_base_drives >> 1; 1456 start_bdev_idxs[2] = g_max_base_drives - 1; 1457 1458 /* consider different offset in strip */ 1459 for (i = 0; i < 3; i++) { 1460 start_offset = offsets_in_strip[i]; 1461 for (j = 0; j < 3; j++) { 1462 end_offset = offsets_in_strip[j]; 1463 if (n_strips == 1 && start_offset > end_offset) { 1464 continue; 1465 } 1466 1467 /* consider at which base_bdev lba is started. */ 1468 for (l = 0; l < 3; l++) { 1469 start_bdev_idx = start_bdev_idxs[l]; 1470 start_bdev_offset = start_bdev_idx * g_strip_size; 1471 lba = g_lba_offset + start_bdev_offset + start_offset; 1472 nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1; 1473 1474 g_io_ranges[g_io_range_idx].lba = lba; 1475 g_io_ranges[g_io_range_idx].nblocks = nblocks; 1476 1477 SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE); 1478 g_io_range_idx++; 1479 } 1480 } 1481 } 1482 } 1483 1484 static void 1485 raid_bdev_io_generate(void) 1486 { 1487 uint64_t n_strips; 1488 uint64_t n_strips_span = g_max_base_drives; 1489 uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1, 1490 g_max_base_drives * 2, g_max_base_drives * 3, 1491 g_max_base_drives * 4 1492 }; 1493 uint32_t i; 1494 1495 g_io_range_idx = 0; 1496 1497 /* consider different number of strips from 1 to strips spanned base bdevs, 1498 * and even to times of strips spanned base bdevs 1499 */ 1500 for (n_strips = 1; n_strips < n_strips_span; n_strips++) { 1501 raid_bdev_io_generate_by_strips(n_strips); 1502 } 1503 1504 for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) { 1505 n_strips = n_strips_times[i]; 1506 raid_bdev_io_generate_by_strips(n_strips); 1507 } 1508 } 1509 1510 static void 1511 test_unmap_io(void) 1512 { 1513 struct rpc_bdev_raid_create req; 1514 struct rpc_bdev_raid_delete destroy_req; 1515 struct raid_bdev *pbdev; 1516 struct spdk_io_channel *ch; 1517 struct raid_bdev_io_channel *ch_ctx; 1518 uint8_t i; 1519 struct spdk_bdev_io *bdev_io; 1520 uint32_t count; 1521 uint64_t io_len; 1522 uint64_t lba; 1523 1524 set_globals(); 1525 CU_ASSERT(raid_bdev_init() == 0); 1526 1527 verify_raid_bdev_present("raid1", false); 1528 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1529 rpc_bdev_raid_create(NULL, NULL); 1530 CU_ASSERT(g_rpc_err == 0); 1531 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1532 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1533 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1534 break; 1535 } 1536 } 1537 CU_ASSERT(pbdev != NULL); 1538 ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1539 SPDK_CU_ASSERT_FATAL(ch != NULL); 1540 ch_ctx = spdk_io_channel_get_ctx(ch); 1541 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1542 1543 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1544 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1545 SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1546 } 1547 1548 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true); 1549 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true); 1550 1551 raid_bdev_io_generate(); 1552 for (count = 0; count < g_io_range_idx; count++) { 1553 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1554 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1555 io_len = g_io_ranges[count].nblocks; 1556 lba = g_io_ranges[count].lba; 1557 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP); 1558 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1559 g_io_output_index = 0; 1560 raid_bdev_submit_request(ch, bdev_io); 1561 verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1562 g_child_io_status_flag); 1563 bdev_io_cleanup(bdev_io); 1564 } 1565 free_test_req(&req); 1566 1567 raid_bdev_destroy_cb(pbdev, ch_ctx); 1568 CU_ASSERT(ch_ctx->base_channel == NULL); 1569 free(ch); 1570 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1571 rpc_bdev_raid_delete(NULL, NULL); 1572 CU_ASSERT(g_rpc_err == 0); 1573 verify_raid_bdev_present("raid1", false); 1574 1575 raid_bdev_exit(); 1576 base_bdevs_cleanup(); 1577 reset_globals(); 1578 } 1579 1580 /* Test IO failures */ 1581 static void 1582 test_io_failure(void) 1583 { 1584 struct rpc_bdev_raid_create req; 1585 struct rpc_bdev_raid_delete destroy_req; 1586 struct raid_bdev *pbdev; 1587 struct spdk_io_channel *ch; 1588 struct raid_bdev_io_channel *ch_ctx; 1589 uint8_t i; 1590 struct spdk_bdev_io *bdev_io; 1591 uint32_t count; 1592 uint64_t io_len; 1593 uint64_t lba; 1594 1595 set_globals(); 1596 CU_ASSERT(raid_bdev_init() == 0); 1597 1598 verify_raid_bdev_present("raid1", false); 1599 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1600 rpc_bdev_raid_create(NULL, NULL); 1601 CU_ASSERT(g_rpc_err == 0); 1602 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1603 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1604 if (strcmp(pbdev->bdev.name, req.name) == 0) { 1605 break; 1606 } 1607 } 1608 CU_ASSERT(pbdev != NULL); 1609 ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1610 SPDK_CU_ASSERT_FATAL(ch != NULL); 1611 ch_ctx = spdk_io_channel_get_ctx(ch); 1612 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1613 1614 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1615 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1616 CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1617 } 1618 free_test_req(&req); 1619 1620 lba = 0; 1621 for (count = 0; count < 1; count++) { 1622 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1623 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1624 io_len = (g_strip_size / 2) << count; 1625 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID); 1626 lba += g_strip_size; 1627 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1628 g_io_output_index = 0; 1629 raid_bdev_submit_request(ch, bdev_io); 1630 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1631 INVALID_IO_SUBMIT); 1632 bdev_io_cleanup(bdev_io); 1633 } 1634 1635 1636 lba = 0; 1637 g_child_io_status_flag = false; 1638 for (count = 0; count < 1; count++) { 1639 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1640 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1641 io_len = (g_strip_size / 2) << count; 1642 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE); 1643 lba += g_strip_size; 1644 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1645 g_io_output_index = 0; 1646 raid_bdev_submit_request(ch, bdev_io); 1647 verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1648 g_child_io_status_flag); 1649 bdev_io_cleanup(bdev_io); 1650 } 1651 1652 raid_bdev_destroy_cb(pbdev, ch_ctx); 1653 CU_ASSERT(ch_ctx->base_channel == NULL); 1654 free(ch); 1655 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1656 rpc_bdev_raid_delete(NULL, NULL); 1657 CU_ASSERT(g_rpc_err == 0); 1658 verify_raid_bdev_present("raid1", false); 1659 1660 raid_bdev_exit(); 1661 base_bdevs_cleanup(); 1662 reset_globals(); 1663 } 1664 1665 /* Test reset IO */ 1666 static void 1667 test_reset_io(void) 1668 { 1669 struct rpc_bdev_raid_create req; 1670 struct rpc_bdev_raid_delete destroy_req; 1671 struct raid_bdev *pbdev; 1672 struct spdk_io_channel *ch; 1673 struct raid_bdev_io_channel *ch_ctx; 1674 uint8_t i; 1675 struct spdk_bdev_io *bdev_io; 1676 1677 set_globals(); 1678 CU_ASSERT(raid_bdev_init() == 0); 1679 1680 verify_raid_bdev_present("raid1", false); 1681 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1682 rpc_bdev_raid_create(NULL, NULL); 1683 CU_ASSERT(g_rpc_err == 0); 1684 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1685 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1686 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1687 break; 1688 } 1689 } 1690 CU_ASSERT(pbdev != NULL); 1691 ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1692 SPDK_CU_ASSERT_FATAL(ch != NULL); 1693 ch_ctx = spdk_io_channel_get_ctx(ch); 1694 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1695 1696 SPDK_CU_ASSERT_FATAL(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1697 for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) { 1698 CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel); 1699 } 1700 free_test_req(&req); 1701 1702 g_bdev_io_submit_status = 0; 1703 g_child_io_status_flag = true; 1704 1705 CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true); 1706 1707 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1708 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1709 bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET); 1710 memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output)); 1711 g_io_output_index = 0; 1712 raid_bdev_submit_request(ch, bdev_io); 1713 verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev, 1714 true); 1715 bdev_io_cleanup(bdev_io); 1716 1717 raid_bdev_destroy_cb(pbdev, ch_ctx); 1718 CU_ASSERT(ch_ctx->base_channel == NULL); 1719 free(ch); 1720 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1721 rpc_bdev_raid_delete(NULL, NULL); 1722 CU_ASSERT(g_rpc_err == 0); 1723 verify_raid_bdev_present("raid1", false); 1724 1725 raid_bdev_exit(); 1726 base_bdevs_cleanup(); 1727 reset_globals(); 1728 } 1729 1730 /* Create multiple raids, destroy raids without IO, get_raids related tests */ 1731 static void 1732 test_multi_raid_no_io(void) 1733 { 1734 struct rpc_bdev_raid_create *construct_req; 1735 struct rpc_bdev_raid_delete destroy_req; 1736 struct rpc_bdev_raid_get_bdevs get_raids_req; 1737 uint8_t i; 1738 char name[16]; 1739 uint8_t bbdev_idx = 0; 1740 1741 set_globals(); 1742 construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create)); 1743 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 1744 CU_ASSERT(raid_bdev_init() == 0); 1745 for (i = 0; i < g_max_raids; i++) { 1746 snprintf(name, 16, "%s%u", "raid", i); 1747 verify_raid_bdev_present(name, false); 1748 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 1749 bbdev_idx += g_max_base_drives; 1750 rpc_bdev_raid_create(NULL, NULL); 1751 CU_ASSERT(g_rpc_err == 0); 1752 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 1753 } 1754 1755 create_get_raids_req(&get_raids_req, "all", 0); 1756 rpc_bdev_raid_get_bdevs(NULL, NULL); 1757 CU_ASSERT(g_rpc_err == 0); 1758 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 1759 for (i = 0; i < g_get_raids_count; i++) { 1760 free(g_get_raids_output[i]); 1761 } 1762 1763 create_get_raids_req(&get_raids_req, "online", 0); 1764 rpc_bdev_raid_get_bdevs(NULL, NULL); 1765 CU_ASSERT(g_rpc_err == 0); 1766 verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count); 1767 for (i = 0; i < g_get_raids_count; i++) { 1768 free(g_get_raids_output[i]); 1769 } 1770 1771 create_get_raids_req(&get_raids_req, "configuring", 0); 1772 rpc_bdev_raid_get_bdevs(NULL, NULL); 1773 CU_ASSERT(g_rpc_err == 0); 1774 CU_ASSERT(g_get_raids_count == 0); 1775 1776 create_get_raids_req(&get_raids_req, "offline", 0); 1777 rpc_bdev_raid_get_bdevs(NULL, NULL); 1778 CU_ASSERT(g_rpc_err == 0); 1779 CU_ASSERT(g_get_raids_count == 0); 1780 1781 create_get_raids_req(&get_raids_req, "invalid_category", 0); 1782 rpc_bdev_raid_get_bdevs(NULL, NULL); 1783 CU_ASSERT(g_rpc_err == 1); 1784 CU_ASSERT(g_get_raids_count == 0); 1785 1786 create_get_raids_req(&get_raids_req, "all", 1); 1787 rpc_bdev_raid_get_bdevs(NULL, NULL); 1788 CU_ASSERT(g_rpc_err == 1); 1789 free(get_raids_req.category); 1790 CU_ASSERT(g_get_raids_count == 0); 1791 1792 create_get_raids_req(&get_raids_req, "all", 0); 1793 rpc_bdev_raid_get_bdevs(NULL, NULL); 1794 CU_ASSERT(g_rpc_err == 0); 1795 CU_ASSERT(g_get_raids_count == g_max_raids); 1796 for (i = 0; i < g_get_raids_count; i++) { 1797 free(g_get_raids_output[i]); 1798 } 1799 1800 for (i = 0; i < g_max_raids; i++) { 1801 SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL); 1802 snprintf(name, 16, "%s", construct_req[i].name); 1803 create_raid_bdev_delete_req(&destroy_req, name, 0); 1804 rpc_bdev_raid_delete(NULL, NULL); 1805 CU_ASSERT(g_rpc_err == 0); 1806 verify_raid_bdev_present(name, false); 1807 } 1808 raid_bdev_exit(); 1809 for (i = 0; i < g_max_raids; i++) { 1810 free_test_req(&construct_req[i]); 1811 } 1812 free(construct_req); 1813 base_bdevs_cleanup(); 1814 reset_globals(); 1815 } 1816 1817 /* Create multiple raids, fire IOs on raids */ 1818 static void 1819 test_multi_raid_with_io(void) 1820 { 1821 struct rpc_bdev_raid_create *construct_req; 1822 struct rpc_bdev_raid_delete destroy_req; 1823 uint8_t i, j; 1824 char name[16]; 1825 uint8_t bbdev_idx = 0; 1826 struct raid_bdev *pbdev; 1827 struct spdk_io_channel *ch; 1828 struct raid_bdev_io_channel *ch_ctx = NULL; 1829 struct spdk_bdev_io *bdev_io; 1830 uint64_t io_len; 1831 uint64_t lba = 0; 1832 int16_t iotype; 1833 struct spdk_io_channel *ch_b; 1834 struct spdk_bdev_channel *ch_b_ctx; 1835 1836 set_globals(); 1837 construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create)); 1838 SPDK_CU_ASSERT_FATAL(construct_req != NULL); 1839 CU_ASSERT(raid_bdev_init() == 0); 1840 ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel)); 1841 SPDK_CU_ASSERT_FATAL(ch != NULL); 1842 1843 ch_b = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct spdk_bdev_channel)); 1844 SPDK_CU_ASSERT_FATAL(ch_b != NULL); 1845 ch_b_ctx = spdk_io_channel_get_ctx(ch_b); 1846 ch_b_ctx->channel = ch; 1847 1848 for (i = 0; i < g_max_raids; i++) { 1849 snprintf(name, 16, "%s%u", "raid", i); 1850 verify_raid_bdev_present(name, false); 1851 create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false); 1852 bbdev_idx += g_max_base_drives; 1853 rpc_bdev_raid_create(NULL, NULL); 1854 CU_ASSERT(g_rpc_err == 0); 1855 verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE); 1856 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1857 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 1858 break; 1859 } 1860 } 1861 CU_ASSERT(pbdev != NULL); 1862 ch_ctx = spdk_io_channel_get_ctx(&ch[i]); 1863 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1864 CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0); 1865 SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL); 1866 for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) { 1867 CU_ASSERT(ch_ctx->base_channel[j] == &g_io_channel); 1868 } 1869 } 1870 1871 /* This will perform a write on the first raid and a read on the second. It can be 1872 * expanded in the future to perform r/w on each raid device in the event that 1873 * multiple raid levels are supported. 1874 */ 1875 for (i = 0; i < g_max_raids; i++) { 1876 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 1877 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1878 io_len = g_strip_size; 1879 iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ; 1880 memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output)); 1881 g_io_output_index = 0; 1882 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1883 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 1884 break; 1885 } 1886 } 1887 bdev_io_initialize(bdev_io, ch_b, &pbdev->bdev, lba, io_len, iotype); 1888 CU_ASSERT(pbdev != NULL); 1889 raid_bdev_submit_request(ch, bdev_io); 1890 verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev, 1891 g_child_io_status_flag); 1892 bdev_io_cleanup(bdev_io); 1893 } 1894 1895 for (i = 0; i < g_max_raids; i++) { 1896 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1897 if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) { 1898 break; 1899 } 1900 } 1901 CU_ASSERT(pbdev != NULL); 1902 ch_ctx = spdk_io_channel_get_ctx(&ch[i]); 1903 SPDK_CU_ASSERT_FATAL(ch_ctx != NULL); 1904 raid_bdev_destroy_cb(pbdev, ch_ctx); 1905 CU_ASSERT(ch_ctx->base_channel == NULL); 1906 snprintf(name, 16, "%s", construct_req[i].name); 1907 create_raid_bdev_delete_req(&destroy_req, name, 0); 1908 rpc_bdev_raid_delete(NULL, NULL); 1909 CU_ASSERT(g_rpc_err == 0); 1910 verify_raid_bdev_present(name, false); 1911 } 1912 raid_bdev_exit(); 1913 for (i = 0; i < g_max_raids; i++) { 1914 free_test_req(&construct_req[i]); 1915 } 1916 free(construct_req); 1917 free(ch); 1918 free(ch_b); 1919 base_bdevs_cleanup(); 1920 reset_globals(); 1921 } 1922 1923 static void 1924 test_io_type_supported(void) 1925 { 1926 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true); 1927 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true); 1928 CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false); 1929 } 1930 1931 static void 1932 test_raid_json_dump_info(void) 1933 { 1934 struct rpc_bdev_raid_create req; 1935 struct rpc_bdev_raid_delete destroy_req; 1936 struct raid_bdev *pbdev; 1937 1938 set_globals(); 1939 CU_ASSERT(raid_bdev_init() == 0); 1940 1941 verify_raid_bdev_present("raid1", false); 1942 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false); 1943 rpc_bdev_raid_create(NULL, NULL); 1944 CU_ASSERT(g_rpc_err == 0); 1945 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 1946 1947 TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) { 1948 if (strcmp(pbdev->bdev.name, "raid1") == 0) { 1949 break; 1950 } 1951 } 1952 CU_ASSERT(pbdev != NULL); 1953 1954 CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0); 1955 1956 free_test_req(&req); 1957 1958 create_raid_bdev_delete_req(&destroy_req, "raid1", 0); 1959 rpc_bdev_raid_delete(NULL, NULL); 1960 CU_ASSERT(g_rpc_err == 0); 1961 verify_raid_bdev_present("raid1", false); 1962 1963 raid_bdev_exit(); 1964 base_bdevs_cleanup(); 1965 reset_globals(); 1966 } 1967 1968 static void 1969 test_context_size(void) 1970 { 1971 CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io)); 1972 } 1973 1974 static void 1975 test_raid_level_conversions(void) 1976 { 1977 const char *raid_str; 1978 1979 CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL); 1980 CU_ASSERT(raid_bdev_str_to_level("0") == RAID0); 1981 CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0); 1982 CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0); 1983 1984 raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL); 1985 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 1986 raid_str = raid_bdev_level_to_str(1234); 1987 CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0); 1988 raid_str = raid_bdev_level_to_str(RAID0); 1989 CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0); 1990 } 1991 1992 static void 1993 test_create_raid_superblock(void) 1994 { 1995 struct rpc_bdev_raid_create req; 1996 struct rpc_bdev_raid_delete delete_req; 1997 1998 set_globals(); 1999 CU_ASSERT(raid_bdev_init() == 0); 2000 2001 verify_raid_bdev_present("raid1", false); 2002 create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true); 2003 rpc_bdev_raid_create(NULL, NULL); 2004 CU_ASSERT(g_rpc_err == 0); 2005 verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE); 2006 free_test_req(&req); 2007 2008 create_raid_bdev_delete_req(&delete_req, "raid1", 0); 2009 rpc_bdev_raid_delete(NULL, NULL); 2010 CU_ASSERT(g_rpc_err == 0); 2011 raid_bdev_exit(); 2012 base_bdevs_cleanup(); 2013 reset_globals(); 2014 2015 } 2016 2017 int 2018 main(int argc, char **argv) 2019 { 2020 CU_pSuite suite = NULL; 2021 unsigned int num_failures; 2022 2023 CU_initialize_registry(); 2024 2025 suite = CU_add_suite("raid", NULL, NULL); 2026 2027 CU_ADD_TEST(suite, test_create_raid); 2028 CU_ADD_TEST(suite, test_create_raid_superblock); 2029 CU_ADD_TEST(suite, test_delete_raid); 2030 CU_ADD_TEST(suite, test_create_raid_invalid_args); 2031 CU_ADD_TEST(suite, test_delete_raid_invalid_args); 2032 CU_ADD_TEST(suite, test_io_channel); 2033 CU_ADD_TEST(suite, test_reset_io); 2034 CU_ADD_TEST(suite, test_write_io); 2035 CU_ADD_TEST(suite, test_read_io); 2036 CU_ADD_TEST(suite, test_unmap_io); 2037 CU_ADD_TEST(suite, test_io_failure); 2038 CU_ADD_TEST(suite, test_multi_raid_no_io); 2039 CU_ADD_TEST(suite, test_multi_raid_with_io); 2040 CU_ADD_TEST(suite, test_io_type_supported); 2041 CU_ADD_TEST(suite, test_raid_json_dump_info); 2042 CU_ADD_TEST(suite, test_context_size); 2043 CU_ADD_TEST(suite, test_raid_level_conversions); 2044 2045 allocate_threads(1); 2046 set_thread(0); 2047 2048 set_test_opts(); 2049 num_failures = spdk_ut_run_tests(argc, argv, NULL); 2050 CU_cleanup_registry(); 2051 2052 free_threads(); 2053 2054 return num_failures; 2055 } 2056