xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision 8130039ee5287100d9eb93eb886967645da3d545)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "bdev/raid/raid0.c"
15 #include "common/lib/ut_multithread.c"
16 
17 #define MAX_BASE_DRIVES 32
18 #define MAX_RAIDS 2
19 #define INVALID_IO_SUBMIT 0xFFFF
20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
22 
23 struct spdk_bdev_channel {
24 	struct spdk_io_channel *channel;
25 };
26 
27 struct spdk_bdev_desc {
28 	struct spdk_bdev *bdev;
29 };
30 
31 /* Data structure to capture the output of IO for verification */
32 struct io_output {
33 	struct spdk_bdev_desc       *desc;
34 	struct spdk_io_channel      *ch;
35 	uint64_t                    offset_blocks;
36 	uint64_t                    num_blocks;
37 	spdk_bdev_io_completion_cb  cb;
38 	void                        *cb_arg;
39 	enum spdk_bdev_io_type      iotype;
40 };
41 
42 struct raid_io_ranges {
43 	uint64_t lba;
44 	uint64_t nblocks;
45 };
46 
47 /* Globals */
48 int g_bdev_io_submit_status;
49 struct io_output *g_io_output = NULL;
50 uint32_t g_io_output_index;
51 uint32_t g_io_comp_status;
52 bool g_child_io_status_flag;
53 void *g_rpc_req;
54 uint32_t g_rpc_req_size;
55 TAILQ_HEAD(bdev, spdk_bdev);
56 struct bdev g_bdev_list;
57 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
58 struct waitq g_io_waitq;
59 uint32_t g_block_len;
60 uint32_t g_strip_size;
61 uint32_t g_max_io_size;
62 uint8_t g_max_base_drives;
63 uint8_t g_max_raids;
64 uint8_t g_ignore_io_output;
65 uint8_t g_rpc_err;
66 char *g_get_raids_output[MAX_RAIDS];
67 uint32_t g_get_raids_count;
68 uint8_t g_json_decode_obj_err;
69 uint8_t g_json_decode_obj_create;
70 uint8_t g_config_level_create = 0;
71 uint8_t g_test_multi_raids;
72 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
73 uint32_t g_io_range_idx;
74 uint64_t g_lba_offset;
75 uint64_t g_bdev_ch_io_device;
76 bool g_bdev_io_defer_completion;
77 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
78 
79 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
80 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
81 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
82 		enum spdk_bdev_io_type io_type), true);
83 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
84 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
85 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
86 		void *cb_arg), 0);
87 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
88 	    NULL);
89 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
90 		uint32_t state_mask));
91 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
92 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
93 					struct spdk_json_write_ctx *w));
94 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
95 		bool value));
96 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
97 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
98 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
99 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
100 		spdk_json_decode_fn decode_func,
101 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
102 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
103 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
104 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
105 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
106 		const char *name), 0);
107 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
108 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
109 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
110 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
111 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
112 		const char *name), 0);
113 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
114 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
115 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
116 		uint64_t val), 0);
117 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
118 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
119 		struct spdk_bdev_io_wait_entry *entry), 0);
120 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
121 		struct spdk_memory_domain **domains,	int array_size), 0);
122 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
123 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
124 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
125 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
126 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev),
127 	    SPDK_DIF_DISABLE);
128 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
129 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
130 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
131 
132 uint32_t
133 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
134 {
135 	return g_block_len;
136 }
137 
138 int
139 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
140 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
141 {
142 	cb(NULL, -EINVAL, cb_ctx);
143 
144 	return 0;
145 }
146 
147 void
148 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
149 {
150 	cb(0, raid_bdev, cb_ctx);
151 }
152 
153 const struct spdk_uuid *
154 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
155 {
156 	return &bdev->uuid;
157 }
158 
159 struct spdk_io_channel *
160 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
161 {
162 	return spdk_get_io_channel(&g_bdev_ch_io_device);
163 }
164 
165 static void
166 set_test_opts(void)
167 {
168 
169 	g_max_base_drives = MAX_BASE_DRIVES;
170 	g_max_raids = MAX_RAIDS;
171 	g_block_len = 4096;
172 	g_strip_size = 64;
173 	g_max_io_size = 1024;
174 
175 	printf("Test Options\n");
176 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
177 	       "g_max_raids = %u\n",
178 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
179 }
180 
181 /* Set globals before every test run */
182 static void
183 set_globals(void)
184 {
185 	uint32_t max_splits;
186 
187 	g_bdev_io_submit_status = 0;
188 	if (g_max_io_size < g_strip_size) {
189 		max_splits = 2;
190 	} else {
191 		max_splits = (g_max_io_size / g_strip_size) + 1;
192 	}
193 	if (max_splits < g_max_base_drives) {
194 		max_splits = g_max_base_drives;
195 	}
196 
197 	g_io_output = calloc(max_splits, sizeof(struct io_output));
198 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
199 	g_io_output_index = 0;
200 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
201 	g_get_raids_count = 0;
202 	g_io_comp_status = 0;
203 	g_ignore_io_output = 0;
204 	g_config_level_create = 0;
205 	g_rpc_err = 0;
206 	g_test_multi_raids = 0;
207 	g_child_io_status_flag = true;
208 	TAILQ_INIT(&g_bdev_list);
209 	TAILQ_INIT(&g_io_waitq);
210 	g_rpc_req = NULL;
211 	g_rpc_req_size = 0;
212 	g_json_decode_obj_err = 0;
213 	g_json_decode_obj_create = 0;
214 	g_lba_offset = 0;
215 	g_bdev_io_defer_completion = false;
216 }
217 
218 static void
219 base_bdevs_cleanup(void)
220 {
221 	struct spdk_bdev *bdev;
222 	struct spdk_bdev *bdev_next;
223 
224 	if (!TAILQ_EMPTY(&g_bdev_list)) {
225 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
226 			free(bdev->name);
227 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
228 			free(bdev);
229 		}
230 	}
231 }
232 
233 static void
234 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
235 {
236 	struct raid_base_bdev_info *base_info;
237 
238 	assert(raid_bdev != NULL);
239 	assert(raid_bdev->base_bdev_info != NULL);
240 
241 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
242 		if (base_info->desc) {
243 			raid_bdev_free_base_bdev_resource(base_info);
244 		}
245 	}
246 	assert(raid_bdev->num_base_bdevs_discovered == 0);
247 	raid_bdev_cleanup_and_free(raid_bdev);
248 }
249 
250 /* Reset globals */
251 static void
252 reset_globals(void)
253 {
254 	if (g_io_output) {
255 		free(g_io_output);
256 		g_io_output = NULL;
257 	}
258 	g_rpc_req = NULL;
259 	g_rpc_req_size = 0;
260 }
261 
262 void
263 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
264 		     uint64_t len)
265 {
266 	cb(bdev_io->internal.ch->channel, bdev_io, true);
267 }
268 
269 /* Store the IO completion status in global variable to verify by various tests */
270 void
271 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
272 {
273 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
274 }
275 
276 static void
277 set_io_output(struct io_output *output,
278 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
279 	      uint64_t offset_blocks, uint64_t num_blocks,
280 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
281 	      enum spdk_bdev_io_type iotype)
282 {
283 	output->desc = desc;
284 	output->ch = ch;
285 	output->offset_blocks = offset_blocks;
286 	output->num_blocks = num_blocks;
287 	output->cb = cb;
288 	output->cb_arg = cb_arg;
289 	output->iotype = iotype;
290 }
291 
292 static void
293 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
294 {
295 	if (g_bdev_io_defer_completion) {
296 		child_io->internal.cb = cb;
297 		child_io->internal.caller_ctx = cb_arg;
298 		TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link);
299 	} else {
300 		cb(child_io, g_child_io_status_flag, cb_arg);
301 	}
302 }
303 
304 static void
305 complete_deferred_ios(void)
306 {
307 	struct spdk_bdev_io *child_io, *tmp;
308 
309 	TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) {
310 		TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
311 		child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
312 	}
313 }
314 
315 /* It will cache the split IOs for verification */
316 int
317 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
318 			struct iovec *iov, int iovcnt,
319 			uint64_t offset_blocks, uint64_t num_blocks,
320 			spdk_bdev_io_completion_cb cb, void *cb_arg)
321 {
322 	struct io_output *output = &g_io_output[g_io_output_index];
323 	struct spdk_bdev_io *child_io;
324 
325 	if (g_ignore_io_output) {
326 		return 0;
327 	}
328 
329 	if (g_max_io_size < g_strip_size) {
330 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
331 	} else {
332 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
333 	}
334 	if (g_bdev_io_submit_status == 0) {
335 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
336 			      SPDK_BDEV_IO_TYPE_WRITE);
337 		g_io_output_index++;
338 
339 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
340 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
341 		child_io_complete(child_io, cb, cb_arg);
342 	}
343 
344 	return g_bdev_io_submit_status;
345 }
346 
347 int
348 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
349 			    struct iovec *iov, int iovcnt,
350 			    uint64_t offset_blocks, uint64_t num_blocks,
351 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
352 			    struct spdk_bdev_ext_io_opts *opts)
353 {
354 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
355 }
356 
357 int
358 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
359 				struct iovec *iov, int iovcnt, void *md,
360 				uint64_t offset_blocks, uint64_t num_blocks,
361 				spdk_bdev_io_completion_cb cb, void *cb_arg)
362 {
363 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
364 }
365 
366 int
367 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
368 		spdk_bdev_io_completion_cb cb, void *cb_arg)
369 {
370 	struct io_output *output = &g_io_output[g_io_output_index];
371 	struct spdk_bdev_io *child_io;
372 
373 	if (g_ignore_io_output) {
374 		return 0;
375 	}
376 
377 	if (g_bdev_io_submit_status == 0) {
378 		set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET);
379 		g_io_output_index++;
380 
381 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
382 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
383 		child_io_complete(child_io, cb, cb_arg);
384 	}
385 
386 	return g_bdev_io_submit_status;
387 }
388 
389 int
390 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
391 		       uint64_t offset_blocks, uint64_t num_blocks,
392 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
393 {
394 	struct io_output *output = &g_io_output[g_io_output_index];
395 	struct spdk_bdev_io *child_io;
396 
397 	if (g_ignore_io_output) {
398 		return 0;
399 	}
400 
401 	if (g_bdev_io_submit_status == 0) {
402 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
403 			      SPDK_BDEV_IO_TYPE_UNMAP);
404 		g_io_output_index++;
405 
406 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
407 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
408 		child_io_complete(child_io, cb, cb_arg);
409 	}
410 
411 	return g_bdev_io_submit_status;
412 }
413 
414 void
415 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
416 {
417 	CU_ASSERT(bdeverrno == 0);
418 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
419 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
420 }
421 
422 int
423 spdk_bdev_register(struct spdk_bdev *bdev)
424 {
425 	TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
426 	return 0;
427 }
428 
429 void
430 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
431 {
432 	int ret;
433 
434 	SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev);
435 	TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
436 
437 	bdev->internal.unregister_cb = cb_fn;
438 	bdev->internal.unregister_ctx = cb_arg;
439 
440 	ret = bdev->fn_table->destruct(bdev->ctxt);
441 	CU_ASSERT(ret == 1);
442 
443 	poll_threads();
444 }
445 
446 int
447 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
448 		   void *event_ctx, struct spdk_bdev_desc **_desc)
449 {
450 	struct spdk_bdev *bdev;
451 
452 	bdev = spdk_bdev_get_by_name(bdev_name);
453 	if (bdev == NULL) {
454 		return -ENODEV;
455 	}
456 
457 	*_desc = (void *)bdev;
458 	return 0;
459 }
460 
461 struct spdk_bdev *
462 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
463 {
464 	return (void *)desc;
465 }
466 
467 int
468 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
469 {
470 	if (!g_test_multi_raids) {
471 		struct rpc_bdev_raid_create *req = g_rpc_req;
472 		if (strcmp(name, "strip_size_kb") == 0) {
473 			CU_ASSERT(req->strip_size_kb == val);
474 		} else if (strcmp(name, "blocklen_shift") == 0) {
475 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
476 		} else if (strcmp(name, "num_base_bdevs") == 0) {
477 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
478 		} else if (strcmp(name, "state") == 0) {
479 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
480 		} else if (strcmp(name, "destruct_called") == 0) {
481 			CU_ASSERT(val == 0);
482 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
483 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
484 		}
485 	}
486 	return 0;
487 }
488 
489 int
490 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
491 {
492 	if (g_test_multi_raids) {
493 		if (strcmp(name, "name") == 0) {
494 			g_get_raids_output[g_get_raids_count] = strdup(val);
495 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
496 			g_get_raids_count++;
497 		}
498 	} else {
499 		struct rpc_bdev_raid_create *req = g_rpc_req;
500 		if (strcmp(name, "raid_level") == 0) {
501 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
502 		}
503 	}
504 	return 0;
505 }
506 
507 int
508 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
509 {
510 	if (!g_test_multi_raids) {
511 		struct rpc_bdev_raid_create *req = g_rpc_req;
512 		if (strcmp(name, "superblock") == 0) {
513 			CU_ASSERT(val == req->superblock_enabled);
514 		}
515 	}
516 	return 0;
517 }
518 
519 void
520 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
521 {
522 	if (bdev_io) {
523 		free(bdev_io);
524 	}
525 }
526 
527 /* It will cache split IOs for verification */
528 int
529 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
530 		       struct iovec *iov, int iovcnt,
531 		       uint64_t offset_blocks, uint64_t num_blocks,
532 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
533 {
534 	struct io_output *output = &g_io_output[g_io_output_index];
535 	struct spdk_bdev_io *child_io;
536 
537 	if (g_ignore_io_output) {
538 		return 0;
539 	}
540 
541 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
542 	if (g_bdev_io_submit_status == 0) {
543 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
544 			      SPDK_BDEV_IO_TYPE_READ);
545 		g_io_output_index++;
546 
547 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
548 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
549 		child_io_complete(child_io, cb, cb_arg);
550 	}
551 
552 	return g_bdev_io_submit_status;
553 }
554 
555 int
556 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
557 			   struct iovec *iov, int iovcnt,
558 			   uint64_t offset_blocks, uint64_t num_blocks,
559 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
560 			   struct spdk_bdev_ext_io_opts *opts)
561 {
562 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
563 }
564 
565 int
566 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc,	struct spdk_io_channel *ch,
567 			       struct iovec *iov, int iovcnt, void *md,
568 			       uint64_t offset_blocks, uint64_t num_blocks,
569 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
570 {
571 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
572 }
573 
574 
575 void
576 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
577 {
578 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
579 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
580 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
581 	bdev->internal.claim.v1.module = NULL;
582 }
583 
584 int
585 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
586 			    struct spdk_bdev_module *module)
587 {
588 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
589 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
590 		return -1;
591 	}
592 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
593 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
594 	bdev->internal.claim.v1.module = module;
595 	return 0;
596 }
597 
598 int
599 spdk_json_decode_object(const struct spdk_json_val *values,
600 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
601 			void *out)
602 {
603 	struct rpc_bdev_raid_create *req, *_out;
604 	size_t i;
605 
606 	if (g_json_decode_obj_err) {
607 		return -1;
608 	} else if (g_json_decode_obj_create) {
609 		req = g_rpc_req;
610 		_out = out;
611 
612 		_out->name = strdup(req->name);
613 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
614 		_out->strip_size_kb = req->strip_size_kb;
615 		_out->level = req->level;
616 		_out->superblock_enabled = req->superblock_enabled;
617 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
618 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
619 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
620 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
621 		}
622 	} else {
623 		memcpy(out, g_rpc_req, g_rpc_req_size);
624 	}
625 
626 	return 0;
627 }
628 
629 struct spdk_json_write_ctx *
630 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
631 {
632 	return (void *)1;
633 }
634 
635 void
636 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
637 				 int error_code, const char *msg)
638 {
639 	g_rpc_err = 1;
640 }
641 
642 void
643 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
644 				     int error_code, const char *fmt, ...)
645 {
646 	g_rpc_err = 1;
647 }
648 
649 struct spdk_bdev *
650 spdk_bdev_get_by_name(const char *bdev_name)
651 {
652 	struct spdk_bdev *bdev;
653 
654 	if (!TAILQ_EMPTY(&g_bdev_list)) {
655 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
656 			if (strcmp(bdev_name, bdev->name) == 0) {
657 				return bdev;
658 			}
659 		}
660 	}
661 
662 	return NULL;
663 }
664 
665 int
666 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
667 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
668 {
669 	if (cb_fn) {
670 		cb_fn(cb_arg, 0);
671 	}
672 
673 	return 0;
674 }
675 
676 int
677 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
678 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
679 {
680 	if (cb_fn) {
681 		cb_fn(cb_arg, 0);
682 	}
683 
684 	return 0;
685 }
686 
687 int
688 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
689 			uint64_t offset, uint64_t length,
690 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
691 {
692 	if (cb_fn) {
693 		cb_fn(cb_arg, 0);
694 	}
695 
696 	return 0;
697 }
698 
699 int
700 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
701 			  uint64_t offset, uint64_t length,
702 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
703 {
704 	if (cb_fn) {
705 		cb_fn(cb_arg, 0);
706 	}
707 
708 	return 0;
709 }
710 
711 static void
712 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
713 {
714 	if (bdev_io->u.bdev.iovs) {
715 		int i;
716 
717 		for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
718 			free(bdev_io->u.bdev.iovs[i].iov_base);
719 		}
720 		free(bdev_io->u.bdev.iovs);
721 	}
722 	free(bdev_io);
723 }
724 
725 static void
726 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch,
727 		    struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype,
728 		    int iovcnt, size_t iov_len)
729 {
730 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
731 	int i;
732 
733 	bdev_io->bdev = bdev;
734 	bdev_io->u.bdev.offset_blocks = lba;
735 	bdev_io->u.bdev.num_blocks = blocks;
736 	bdev_io->type = iotype;
737 	bdev_io->internal.ch = channel;
738 	bdev_io->u.bdev.iovcnt = iovcnt;
739 
740 	if (iovcnt == 0) {
741 		bdev_io->u.bdev.iovs = NULL;
742 		return;
743 	}
744 
745 	SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len);
746 
747 	bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec));
748 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
749 
750 	for (i = 0; i < iovcnt; i++) {
751 		struct iovec *iov = &bdev_io->u.bdev.iovs[i];
752 
753 		iov->iov_base = calloc(1, iov_len);
754 		SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
755 		iov->iov_len = iov_len;
756 	}
757 }
758 
759 static void
760 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
761 		   uint64_t lba, uint64_t blocks, int16_t iotype)
762 {
763 	int iovcnt;
764 	size_t iov_len;
765 
766 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
767 		iovcnt = 0;
768 		iov_len = 0;
769 	} else {
770 		iovcnt = 1;
771 		iov_len = blocks * g_block_len;
772 	}
773 
774 	_bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len);
775 }
776 
777 static void
778 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
779 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
780 {
781 	uint8_t index = 0;
782 	struct io_output *output;
783 
784 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
785 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
786 	SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
787 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
788 
789 	CU_ASSERT(g_io_output_index == num_base_drives);
790 	for (index = 0; index < g_io_output_index; index++) {
791 		output = &g_io_output[index];
792 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
793 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
794 		CU_ASSERT(bdev_io->type == output->iotype);
795 	}
796 	CU_ASSERT(g_io_comp_status == io_status);
797 }
798 
799 static void
800 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
801 	  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
802 {
803 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
804 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
805 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
806 			     strip_shift;
807 	uint32_t splits_reqd = (end_strip - start_strip + 1);
808 	uint32_t strip;
809 	uint64_t pd_strip;
810 	uint8_t pd_idx;
811 	uint32_t offset_in_strip;
812 	uint64_t pd_lba;
813 	uint64_t pd_blocks;
814 	uint32_t index = 0;
815 	struct io_output *output;
816 
817 	if (io_status == INVALID_IO_SUBMIT) {
818 		CU_ASSERT(g_io_comp_status == false);
819 		return;
820 	}
821 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
822 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
823 
824 	CU_ASSERT(splits_reqd == g_io_output_index);
825 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
826 		pd_strip = strip / num_base_drives;
827 		pd_idx = strip % num_base_drives;
828 		if (strip == start_strip) {
829 			offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
830 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
831 			if (strip == end_strip) {
832 				pd_blocks = bdev_io->u.bdev.num_blocks;
833 			} else {
834 				pd_blocks = g_strip_size - offset_in_strip;
835 			}
836 		} else if (strip == end_strip) {
837 			pd_lba = pd_strip << strip_shift;
838 			pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
839 				     (g_strip_size - 1)) + 1;
840 		} else {
841 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
842 			pd_blocks = raid_bdev->strip_size;
843 		}
844 		output = &g_io_output[index];
845 		CU_ASSERT(pd_lba == output->offset_blocks);
846 		CU_ASSERT(pd_blocks == output->num_blocks);
847 		CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
848 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
849 		CU_ASSERT(bdev_io->type == output->iotype);
850 	}
851 	CU_ASSERT(g_io_comp_status == io_status);
852 }
853 
854 static void
855 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
856 			  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
857 			  uint32_t io_status)
858 {
859 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
860 	uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
861 	uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
862 				       g_strip_size;
863 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
864 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
865 			     strip_shift;
866 	uint8_t n_disks_involved;
867 	uint64_t start_strip_disk_idx;
868 	uint64_t end_strip_disk_idx;
869 	uint64_t nblocks_in_start_disk;
870 	uint64_t offset_in_start_disk;
871 	uint8_t disk_idx;
872 	uint64_t base_io_idx;
873 	uint64_t sum_nblocks = 0;
874 	struct io_output *output;
875 
876 	if (io_status == INVALID_IO_SUBMIT) {
877 		CU_ASSERT(g_io_comp_status == false);
878 		return;
879 	}
880 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
881 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
882 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
883 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
884 
885 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
886 	CU_ASSERT(n_disks_involved == g_io_output_index);
887 
888 	start_strip_disk_idx = start_strip % num_base_drives;
889 	end_strip_disk_idx = end_strip % num_base_drives;
890 
891 	offset_in_start_disk = g_io_output[0].offset_blocks;
892 	nblocks_in_start_disk = g_io_output[0].num_blocks;
893 
894 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
895 	     base_io_idx++, disk_idx++) {
896 		uint64_t start_offset_in_disk;
897 		uint64_t end_offset_in_disk;
898 
899 		output = &g_io_output[base_io_idx];
900 
901 		/* round disk_idx */
902 		if (disk_idx >= num_base_drives) {
903 			disk_idx %= num_base_drives;
904 		}
905 
906 		/* start_offset_in_disk aligned in strip check:
907 		 * The first base io has a same start_offset_in_strip with the whole raid io.
908 		 * Other base io should have aligned start_offset_in_strip which is 0.
909 		 */
910 		start_offset_in_disk = output->offset_blocks;
911 		if (base_io_idx == 0) {
912 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
913 		} else {
914 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
915 		}
916 
917 		/* end_offset_in_disk aligned in strip check:
918 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
919 		 * with the whole raid io.
920 		 * Other base io should have aligned end_offset_in_strip.
921 		 */
922 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
923 		if (disk_idx == end_strip_disk_idx) {
924 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
925 		} else {
926 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
927 		}
928 
929 		/* start_offset_in_disk compared with start_disk.
930 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
931 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
932 		 *    must be less than strip size.
933 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
934 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
935 		 *    be less than strip size.
936 		 */
937 		if (disk_idx > start_strip_disk_idx) {
938 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
939 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
940 		} else if (disk_idx < start_strip_disk_idx) {
941 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
942 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
943 		}
944 
945 		/* nblocks compared with start_disk:
946 		 * The gap between them must be within a strip size.
947 		 */
948 		if (output->num_blocks <= nblocks_in_start_disk) {
949 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
950 		} else {
951 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
952 		}
953 
954 		sum_nblocks += output->num_blocks;
955 
956 		CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
957 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
958 		CU_ASSERT(bdev_io->type == output->iotype);
959 	}
960 
961 	/* Sum of each nblocks should be same with raid bdev_io */
962 	CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
963 
964 	CU_ASSERT(g_io_comp_status == io_status);
965 }
966 
967 static void
968 verify_raid_bdev_present(const char *name, bool presence)
969 {
970 	struct raid_bdev *pbdev;
971 	bool   pbdev_found;
972 
973 	pbdev_found = false;
974 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
975 		if (strcmp(pbdev->bdev.name, name) == 0) {
976 			pbdev_found = true;
977 			break;
978 		}
979 	}
980 	if (presence == true) {
981 		CU_ASSERT(pbdev_found == true);
982 	} else {
983 		CU_ASSERT(pbdev_found == false);
984 	}
985 }
986 
987 static void
988 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
989 {
990 	struct raid_bdev *pbdev;
991 	struct raid_base_bdev_info *base_info;
992 	struct spdk_bdev *bdev = NULL;
993 	bool   pbdev_found;
994 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
995 
996 	pbdev_found = false;
997 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
998 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
999 			pbdev_found = true;
1000 			if (presence == false) {
1001 				break;
1002 			}
1003 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1004 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
1005 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
1006 					g_block_len)));
1007 			CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
1008 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
1009 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
1010 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
1011 			CU_ASSERT(pbdev->level == r->level);
1012 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1013 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
1014 				CU_ASSERT(base_info->desc != NULL);
1015 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
1016 				CU_ASSERT(bdev != NULL);
1017 				CU_ASSERT(base_info->remove_scheduled == false);
1018 				CU_ASSERT((pbdev->sb != NULL && base_info->data_offset != 0) ||
1019 					  (pbdev->sb == NULL && base_info->data_offset == 0));
1020 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
1021 
1022 				if (bdev && base_info->data_size < min_blockcnt) {
1023 					min_blockcnt = base_info->data_size;
1024 				}
1025 			}
1026 			CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
1027 				    (r->strip_size_kb * 1024 / g_block_len)) *
1028 				   r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
1029 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
1030 			CU_ASSERT(pbdev->bdev.write_cache == 0);
1031 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
1032 			if (pbdev->num_base_bdevs > 1) {
1033 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
1034 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
1035 			} else {
1036 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
1037 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
1038 			}
1039 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
1040 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
1041 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
1042 			break;
1043 		}
1044 	}
1045 	if (presence == true) {
1046 		CU_ASSERT(pbdev_found == true);
1047 	} else {
1048 		CU_ASSERT(pbdev_found == false);
1049 	}
1050 }
1051 
1052 static void
1053 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
1054 		 uint8_t g_max_raids,
1055 		 char **g_get_raids_output, uint32_t g_get_raids_count)
1056 {
1057 	uint8_t i, j;
1058 	bool found;
1059 
1060 	CU_ASSERT(g_max_raids == g_get_raids_count);
1061 	if (g_max_raids == g_get_raids_count) {
1062 		for (i = 0; i < g_max_raids; i++) {
1063 			found = false;
1064 			for (j = 0; j < g_max_raids; j++) {
1065 				if (construct_req[i].name &&
1066 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
1067 					found = true;
1068 					break;
1069 				}
1070 			}
1071 			CU_ASSERT(found == true);
1072 		}
1073 	}
1074 }
1075 
1076 static void
1077 create_base_bdevs(uint32_t bbdev_start_idx)
1078 {
1079 	uint8_t i;
1080 	struct spdk_bdev *base_bdev;
1081 	char name[16];
1082 
1083 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
1084 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
1085 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
1086 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
1087 		base_bdev->name = strdup(name);
1088 		spdk_uuid_generate(&base_bdev->uuid);
1089 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
1090 		base_bdev->blocklen = g_block_len;
1091 		base_bdev->blockcnt = BLOCK_CNT;
1092 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
1093 	}
1094 }
1095 
1096 static void
1097 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1098 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
1099 {
1100 	uint8_t i;
1101 	char name[16];
1102 	uint8_t bbdev_idx = bbdev_start_idx;
1103 
1104 	r->name = strdup(raid_name);
1105 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1106 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
1107 	r->level = RAID0;
1108 	r->superblock_enabled = superblock_enabled;
1109 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
1110 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
1111 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
1112 		r->base_bdevs.base_bdevs[i] = strdup(name);
1113 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
1114 	}
1115 	if (create_base_bdev == true) {
1116 		create_base_bdevs(bbdev_start_idx);
1117 	}
1118 	g_rpc_req = r;
1119 	g_rpc_req_size = sizeof(*r);
1120 }
1121 
1122 static void
1123 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1124 			    uint8_t bbdev_start_idx, bool create_base_bdev,
1125 			    uint8_t json_decode_obj_err, bool superblock_enabled)
1126 {
1127 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
1128 
1129 	g_rpc_err = 0;
1130 	g_json_decode_obj_create = 1;
1131 	g_json_decode_obj_err = json_decode_obj_err;
1132 	g_config_level_create = 0;
1133 	g_test_multi_raids = 0;
1134 }
1135 
1136 static void
1137 free_test_req(struct rpc_bdev_raid_create *r)
1138 {
1139 	uint8_t i;
1140 
1141 	free(r->name);
1142 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
1143 		free(r->base_bdevs.base_bdevs[i]);
1144 	}
1145 }
1146 
1147 static void
1148 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
1149 			    uint8_t json_decode_obj_err)
1150 {
1151 	r->name = strdup(raid_name);
1152 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1153 
1154 	g_rpc_req = r;
1155 	g_rpc_req_size = sizeof(*r);
1156 	g_rpc_err = 0;
1157 	g_json_decode_obj_create = 0;
1158 	g_json_decode_obj_err = json_decode_obj_err;
1159 	g_config_level_create = 0;
1160 	g_test_multi_raids = 0;
1161 }
1162 
1163 static void
1164 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
1165 		     uint8_t json_decode_obj_err)
1166 {
1167 	r->category = strdup(category);
1168 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
1169 
1170 	g_rpc_req = r;
1171 	g_rpc_req_size = sizeof(*r);
1172 	g_rpc_err = 0;
1173 	g_json_decode_obj_create = 0;
1174 	g_json_decode_obj_err = json_decode_obj_err;
1175 	g_config_level_create = 0;
1176 	g_test_multi_raids = 1;
1177 	g_get_raids_count = 0;
1178 }
1179 
1180 static void
1181 test_create_raid(void)
1182 {
1183 	struct rpc_bdev_raid_create req;
1184 	struct rpc_bdev_raid_delete delete_req;
1185 
1186 	set_globals();
1187 	CU_ASSERT(raid_bdev_init() == 0);
1188 
1189 	verify_raid_bdev_present("raid1", false);
1190 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1191 	rpc_bdev_raid_create(NULL, NULL);
1192 	CU_ASSERT(g_rpc_err == 0);
1193 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1194 	free_test_req(&req);
1195 
1196 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1197 	rpc_bdev_raid_delete(NULL, NULL);
1198 	CU_ASSERT(g_rpc_err == 0);
1199 	raid_bdev_exit();
1200 	base_bdevs_cleanup();
1201 	reset_globals();
1202 }
1203 
1204 static void
1205 test_delete_raid(void)
1206 {
1207 	struct rpc_bdev_raid_create construct_req;
1208 	struct rpc_bdev_raid_delete delete_req;
1209 
1210 	set_globals();
1211 	CU_ASSERT(raid_bdev_init() == 0);
1212 
1213 	verify_raid_bdev_present("raid1", false);
1214 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1215 	rpc_bdev_raid_create(NULL, NULL);
1216 	CU_ASSERT(g_rpc_err == 0);
1217 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1218 	free_test_req(&construct_req);
1219 
1220 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1221 	rpc_bdev_raid_delete(NULL, NULL);
1222 	CU_ASSERT(g_rpc_err == 0);
1223 	verify_raid_bdev_present("raid1", false);
1224 
1225 	raid_bdev_exit();
1226 	base_bdevs_cleanup();
1227 	reset_globals();
1228 }
1229 
1230 static void
1231 test_create_raid_invalid_args(void)
1232 {
1233 	struct rpc_bdev_raid_create req;
1234 	struct rpc_bdev_raid_delete destroy_req;
1235 	struct raid_bdev *raid_bdev;
1236 
1237 	set_globals();
1238 	CU_ASSERT(raid_bdev_init() == 0);
1239 
1240 	verify_raid_bdev_present("raid1", false);
1241 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1242 	req.level = INVALID_RAID_LEVEL;
1243 	rpc_bdev_raid_create(NULL, NULL);
1244 	CU_ASSERT(g_rpc_err == 1);
1245 	free_test_req(&req);
1246 	verify_raid_bdev_present("raid1", false);
1247 
1248 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
1249 	rpc_bdev_raid_create(NULL, NULL);
1250 	CU_ASSERT(g_rpc_err == 1);
1251 	free_test_req(&req);
1252 	verify_raid_bdev_present("raid1", false);
1253 
1254 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1255 	req.strip_size_kb = 1231;
1256 	rpc_bdev_raid_create(NULL, NULL);
1257 	CU_ASSERT(g_rpc_err == 1);
1258 	free_test_req(&req);
1259 	verify_raid_bdev_present("raid1", false);
1260 
1261 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1262 	rpc_bdev_raid_create(NULL, NULL);
1263 	CU_ASSERT(g_rpc_err == 0);
1264 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1265 	free_test_req(&req);
1266 
1267 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1268 	rpc_bdev_raid_create(NULL, NULL);
1269 	CU_ASSERT(g_rpc_err == 1);
1270 	free_test_req(&req);
1271 
1272 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
1273 	rpc_bdev_raid_create(NULL, NULL);
1274 	CU_ASSERT(g_rpc_err == 1);
1275 	free_test_req(&req);
1276 	verify_raid_bdev_present("raid2", false);
1277 
1278 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1279 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1280 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1281 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1282 	rpc_bdev_raid_create(NULL, NULL);
1283 	CU_ASSERT(g_rpc_err == 1);
1284 	free_test_req(&req);
1285 	verify_raid_bdev_present("raid2", false);
1286 
1287 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1288 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1289 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1290 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1291 	rpc_bdev_raid_create(NULL, NULL);
1292 	CU_ASSERT(g_rpc_err == 0);
1293 	free_test_req(&req);
1294 	verify_raid_bdev_present("raid2", true);
1295 	raid_bdev = raid_bdev_find_by_name("raid2");
1296 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1297 	check_and_remove_raid_bdev(raid_bdev);
1298 
1299 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1300 	rpc_bdev_raid_create(NULL, NULL);
1301 	CU_ASSERT(g_rpc_err == 0);
1302 	free_test_req(&req);
1303 	verify_raid_bdev_present("raid2", true);
1304 	verify_raid_bdev_present("raid1", true);
1305 
1306 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1307 	rpc_bdev_raid_delete(NULL, NULL);
1308 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1309 	rpc_bdev_raid_delete(NULL, NULL);
1310 	raid_bdev_exit();
1311 	base_bdevs_cleanup();
1312 	reset_globals();
1313 }
1314 
1315 static void
1316 test_delete_raid_invalid_args(void)
1317 {
1318 	struct rpc_bdev_raid_create construct_req;
1319 	struct rpc_bdev_raid_delete destroy_req;
1320 
1321 	set_globals();
1322 	CU_ASSERT(raid_bdev_init() == 0);
1323 
1324 	verify_raid_bdev_present("raid1", false);
1325 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1326 	rpc_bdev_raid_create(NULL, NULL);
1327 	CU_ASSERT(g_rpc_err == 0);
1328 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1329 	free_test_req(&construct_req);
1330 
1331 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1332 	rpc_bdev_raid_delete(NULL, NULL);
1333 	CU_ASSERT(g_rpc_err == 1);
1334 
1335 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1336 	rpc_bdev_raid_delete(NULL, NULL);
1337 	CU_ASSERT(g_rpc_err == 1);
1338 	free(destroy_req.name);
1339 	verify_raid_bdev_present("raid1", true);
1340 
1341 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1342 	rpc_bdev_raid_delete(NULL, NULL);
1343 	CU_ASSERT(g_rpc_err == 0);
1344 	verify_raid_bdev_present("raid1", false);
1345 
1346 	raid_bdev_exit();
1347 	base_bdevs_cleanup();
1348 	reset_globals();
1349 }
1350 
1351 static void
1352 test_io_channel(void)
1353 {
1354 	struct rpc_bdev_raid_create req;
1355 	struct rpc_bdev_raid_delete destroy_req;
1356 	struct raid_bdev *pbdev;
1357 	struct spdk_io_channel *ch;
1358 	struct raid_bdev_io_channel *ch_ctx;
1359 
1360 	set_globals();
1361 	CU_ASSERT(raid_bdev_init() == 0);
1362 
1363 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1364 	verify_raid_bdev_present("raid1", false);
1365 	rpc_bdev_raid_create(NULL, NULL);
1366 	CU_ASSERT(g_rpc_err == 0);
1367 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1368 
1369 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1370 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1371 			break;
1372 		}
1373 	}
1374 	CU_ASSERT(pbdev != NULL);
1375 
1376 	ch = spdk_get_io_channel(pbdev);
1377 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1378 
1379 	ch_ctx = spdk_io_channel_get_ctx(ch);
1380 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1381 
1382 	free_test_req(&req);
1383 
1384 	spdk_put_io_channel(ch);
1385 
1386 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1387 	rpc_bdev_raid_delete(NULL, NULL);
1388 	CU_ASSERT(g_rpc_err == 0);
1389 	verify_raid_bdev_present("raid1", false);
1390 
1391 	raid_bdev_exit();
1392 	base_bdevs_cleanup();
1393 	reset_globals();
1394 }
1395 
1396 static void
1397 test_write_io(void)
1398 {
1399 	struct rpc_bdev_raid_create req;
1400 	struct rpc_bdev_raid_delete destroy_req;
1401 	struct raid_bdev *pbdev;
1402 	struct spdk_io_channel *ch;
1403 	struct raid_bdev_io_channel *ch_ctx;
1404 	uint8_t i;
1405 	struct spdk_bdev_io *bdev_io;
1406 	uint64_t io_len;
1407 	uint64_t lba = 0;
1408 
1409 	set_globals();
1410 	CU_ASSERT(raid_bdev_init() == 0);
1411 
1412 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1413 	verify_raid_bdev_present("raid1", false);
1414 	rpc_bdev_raid_create(NULL, NULL);
1415 	CU_ASSERT(g_rpc_err == 0);
1416 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1417 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1418 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1419 			break;
1420 		}
1421 	}
1422 	CU_ASSERT(pbdev != NULL);
1423 
1424 	ch = spdk_get_io_channel(pbdev);
1425 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1426 
1427 	ch_ctx = spdk_io_channel_get_ctx(ch);
1428 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1429 
1430 	/* test 2 IO sizes based on global strip size set earlier */
1431 	for (i = 0; i < 2; i++) {
1432 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1433 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1434 		io_len = (g_strip_size / 2) << i;
1435 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1436 		lba += g_strip_size;
1437 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1438 		g_io_output_index = 0;
1439 		raid_bdev_submit_request(ch, bdev_io);
1440 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1441 			  g_child_io_status_flag);
1442 		bdev_io_cleanup(bdev_io);
1443 	}
1444 
1445 	free_test_req(&req);
1446 	spdk_put_io_channel(ch);
1447 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1448 	rpc_bdev_raid_delete(NULL, NULL);
1449 	CU_ASSERT(g_rpc_err == 0);
1450 	verify_raid_bdev_present("raid1", false);
1451 
1452 	raid_bdev_exit();
1453 	base_bdevs_cleanup();
1454 	reset_globals();
1455 }
1456 
1457 static void
1458 test_read_io(void)
1459 {
1460 	struct rpc_bdev_raid_create req;
1461 	struct rpc_bdev_raid_delete destroy_req;
1462 	struct raid_bdev *pbdev;
1463 	struct spdk_io_channel *ch;
1464 	struct raid_bdev_io_channel *ch_ctx;
1465 	uint8_t i;
1466 	struct spdk_bdev_io *bdev_io;
1467 	uint64_t io_len;
1468 	uint64_t lba;
1469 
1470 	set_globals();
1471 	CU_ASSERT(raid_bdev_init() == 0);
1472 
1473 	verify_raid_bdev_present("raid1", false);
1474 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1475 	rpc_bdev_raid_create(NULL, NULL);
1476 	CU_ASSERT(g_rpc_err == 0);
1477 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1478 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1479 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1480 			break;
1481 		}
1482 	}
1483 	CU_ASSERT(pbdev != NULL);
1484 
1485 	ch = spdk_get_io_channel(pbdev);
1486 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1487 
1488 	ch_ctx = spdk_io_channel_get_ctx(ch);
1489 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1490 
1491 	/* test 2 IO sizes based on global strip size set earlier */
1492 	lba = 0;
1493 	for (i = 0; i < 2; i++) {
1494 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1495 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1496 		io_len = (g_strip_size / 2) << i;
1497 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
1498 		lba += g_strip_size;
1499 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1500 		g_io_output_index = 0;
1501 		raid_bdev_submit_request(ch, bdev_io);
1502 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1503 			  g_child_io_status_flag);
1504 		bdev_io_cleanup(bdev_io);
1505 	}
1506 
1507 	free_test_req(&req);
1508 	spdk_put_io_channel(ch);
1509 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1510 	rpc_bdev_raid_delete(NULL, NULL);
1511 	CU_ASSERT(g_rpc_err == 0);
1512 	verify_raid_bdev_present("raid1", false);
1513 
1514 	raid_bdev_exit();
1515 	base_bdevs_cleanup();
1516 	reset_globals();
1517 }
1518 
1519 static void
1520 raid_bdev_io_generate_by_strips(uint64_t n_strips)
1521 {
1522 	uint64_t lba;
1523 	uint64_t nblocks;
1524 	uint64_t start_offset;
1525 	uint64_t end_offset;
1526 	uint64_t offsets_in_strip[3];
1527 	uint64_t start_bdev_idx;
1528 	uint64_t start_bdev_offset;
1529 	uint64_t start_bdev_idxs[3];
1530 	int i, j, l;
1531 
1532 	/* 3 different situations of offset in strip */
1533 	offsets_in_strip[0] = 0;
1534 	offsets_in_strip[1] = g_strip_size >> 1;
1535 	offsets_in_strip[2] = g_strip_size - 1;
1536 
1537 	/* 3 different situations of start_bdev_idx */
1538 	start_bdev_idxs[0] = 0;
1539 	start_bdev_idxs[1] = g_max_base_drives >> 1;
1540 	start_bdev_idxs[2] = g_max_base_drives - 1;
1541 
1542 	/* consider different offset in strip */
1543 	for (i = 0; i < 3; i++) {
1544 		start_offset = offsets_in_strip[i];
1545 		for (j = 0; j < 3; j++) {
1546 			end_offset = offsets_in_strip[j];
1547 			if (n_strips == 1 && start_offset > end_offset) {
1548 				continue;
1549 			}
1550 
1551 			/* consider at which base_bdev lba is started. */
1552 			for (l = 0; l < 3; l++) {
1553 				start_bdev_idx = start_bdev_idxs[l];
1554 				start_bdev_offset = start_bdev_idx * g_strip_size;
1555 				lba = g_lba_offset + start_bdev_offset + start_offset;
1556 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
1557 
1558 				g_io_ranges[g_io_range_idx].lba = lba;
1559 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
1560 
1561 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
1562 				g_io_range_idx++;
1563 			}
1564 		}
1565 	}
1566 }
1567 
1568 static void
1569 raid_bdev_io_generate(void)
1570 {
1571 	uint64_t n_strips;
1572 	uint64_t n_strips_span = g_max_base_drives;
1573 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
1574 				      g_max_base_drives * 2, g_max_base_drives * 3,
1575 				      g_max_base_drives * 4
1576 				     };
1577 	uint32_t i;
1578 
1579 	g_io_range_idx = 0;
1580 
1581 	/* consider different number of strips from 1 to strips spanned base bdevs,
1582 	 * and even to times of strips spanned base bdevs
1583 	 */
1584 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
1585 		raid_bdev_io_generate_by_strips(n_strips);
1586 	}
1587 
1588 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
1589 		n_strips = n_strips_times[i];
1590 		raid_bdev_io_generate_by_strips(n_strips);
1591 	}
1592 }
1593 
1594 static void
1595 test_unmap_io(void)
1596 {
1597 	struct rpc_bdev_raid_create req;
1598 	struct rpc_bdev_raid_delete destroy_req;
1599 	struct raid_bdev *pbdev;
1600 	struct spdk_io_channel *ch;
1601 	struct raid_bdev_io_channel *ch_ctx;
1602 	struct spdk_bdev_io *bdev_io;
1603 	uint32_t count;
1604 	uint64_t io_len;
1605 	uint64_t lba;
1606 
1607 	set_globals();
1608 	CU_ASSERT(raid_bdev_init() == 0);
1609 
1610 	verify_raid_bdev_present("raid1", false);
1611 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1612 	rpc_bdev_raid_create(NULL, NULL);
1613 	CU_ASSERT(g_rpc_err == 0);
1614 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1615 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1616 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1617 			break;
1618 		}
1619 	}
1620 	CU_ASSERT(pbdev != NULL);
1621 
1622 	ch = spdk_get_io_channel(pbdev);
1623 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1624 
1625 	ch_ctx = spdk_io_channel_get_ctx(ch);
1626 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1627 
1628 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
1629 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
1630 
1631 	raid_bdev_io_generate();
1632 	for (count = 0; count < g_io_range_idx; count++) {
1633 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1634 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1635 		io_len = g_io_ranges[count].nblocks;
1636 		lba = g_io_ranges[count].lba;
1637 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
1638 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1639 		g_io_output_index = 0;
1640 		raid_bdev_submit_request(ch, bdev_io);
1641 		verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1642 					  g_child_io_status_flag);
1643 		bdev_io_cleanup(bdev_io);
1644 	}
1645 
1646 	free_test_req(&req);
1647 	spdk_put_io_channel(ch);
1648 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1649 	rpc_bdev_raid_delete(NULL, NULL);
1650 	CU_ASSERT(g_rpc_err == 0);
1651 	verify_raid_bdev_present("raid1", false);
1652 
1653 	raid_bdev_exit();
1654 	base_bdevs_cleanup();
1655 	reset_globals();
1656 }
1657 
1658 /* Test IO failures */
1659 static void
1660 test_io_failure(void)
1661 {
1662 	struct rpc_bdev_raid_create req;
1663 	struct rpc_bdev_raid_delete destroy_req;
1664 	struct raid_bdev *pbdev;
1665 	struct spdk_io_channel *ch;
1666 	struct raid_bdev_io_channel *ch_ctx;
1667 	struct spdk_bdev_io *bdev_io;
1668 	uint32_t count;
1669 	uint64_t io_len;
1670 	uint64_t lba;
1671 
1672 	set_globals();
1673 	CU_ASSERT(raid_bdev_init() == 0);
1674 
1675 	verify_raid_bdev_present("raid1", false);
1676 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1677 	rpc_bdev_raid_create(NULL, NULL);
1678 	CU_ASSERT(g_rpc_err == 0);
1679 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1680 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1681 		if (strcmp(pbdev->bdev.name, req.name) == 0) {
1682 			break;
1683 		}
1684 	}
1685 	CU_ASSERT(pbdev != NULL);
1686 
1687 	ch = spdk_get_io_channel(pbdev);
1688 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1689 
1690 	ch_ctx = spdk_io_channel_get_ctx(ch);
1691 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1692 
1693 	lba = 0;
1694 	for (count = 0; count < 1; count++) {
1695 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1696 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1697 		io_len = (g_strip_size / 2) << count;
1698 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
1699 		lba += g_strip_size;
1700 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1701 		g_io_output_index = 0;
1702 		raid_bdev_submit_request(ch, bdev_io);
1703 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1704 			  INVALID_IO_SUBMIT);
1705 		bdev_io_cleanup(bdev_io);
1706 	}
1707 
1708 
1709 	lba = 0;
1710 	g_child_io_status_flag = false;
1711 	for (count = 0; count < 1; count++) {
1712 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1713 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1714 		io_len = (g_strip_size / 2) << count;
1715 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1716 		lba += g_strip_size;
1717 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1718 		g_io_output_index = 0;
1719 		raid_bdev_submit_request(ch, bdev_io);
1720 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1721 			  g_child_io_status_flag);
1722 		bdev_io_cleanup(bdev_io);
1723 	}
1724 
1725 	free_test_req(&req);
1726 	spdk_put_io_channel(ch);
1727 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1728 	rpc_bdev_raid_delete(NULL, NULL);
1729 	CU_ASSERT(g_rpc_err == 0);
1730 	verify_raid_bdev_present("raid1", false);
1731 
1732 	raid_bdev_exit();
1733 	base_bdevs_cleanup();
1734 	reset_globals();
1735 }
1736 
1737 /* Test reset IO */
1738 static void
1739 test_reset_io(void)
1740 {
1741 	struct rpc_bdev_raid_create req;
1742 	struct rpc_bdev_raid_delete destroy_req;
1743 	struct raid_bdev *pbdev;
1744 	struct spdk_io_channel *ch;
1745 	struct raid_bdev_io_channel *ch_ctx;
1746 	struct spdk_bdev_io *bdev_io;
1747 
1748 	set_globals();
1749 	CU_ASSERT(raid_bdev_init() == 0);
1750 
1751 	verify_raid_bdev_present("raid1", false);
1752 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1753 	rpc_bdev_raid_create(NULL, NULL);
1754 	CU_ASSERT(g_rpc_err == 0);
1755 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1756 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1757 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1758 			break;
1759 		}
1760 	}
1761 	CU_ASSERT(pbdev != NULL);
1762 
1763 	ch = spdk_get_io_channel(pbdev);
1764 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1765 
1766 	ch_ctx = spdk_io_channel_get_ctx(ch);
1767 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1768 
1769 	g_bdev_io_submit_status = 0;
1770 	g_child_io_status_flag = true;
1771 
1772 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1773 
1774 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1775 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1776 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1777 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1778 	g_io_output_index = 0;
1779 	raid_bdev_submit_request(ch, bdev_io);
1780 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1781 			true);
1782 	bdev_io_cleanup(bdev_io);
1783 
1784 	free_test_req(&req);
1785 	spdk_put_io_channel(ch);
1786 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1787 	rpc_bdev_raid_delete(NULL, NULL);
1788 	CU_ASSERT(g_rpc_err == 0);
1789 	verify_raid_bdev_present("raid1", false);
1790 
1791 	raid_bdev_exit();
1792 	base_bdevs_cleanup();
1793 	reset_globals();
1794 }
1795 
1796 /* Create multiple raids, destroy raids without IO, get_raids related tests */
1797 static void
1798 test_multi_raid_no_io(void)
1799 {
1800 	struct rpc_bdev_raid_create *construct_req;
1801 	struct rpc_bdev_raid_delete destroy_req;
1802 	struct rpc_bdev_raid_get_bdevs get_raids_req;
1803 	uint8_t i;
1804 	char name[16];
1805 	uint8_t bbdev_idx = 0;
1806 
1807 	set_globals();
1808 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
1809 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1810 	CU_ASSERT(raid_bdev_init() == 0);
1811 	for (i = 0; i < g_max_raids; i++) {
1812 		snprintf(name, 16, "%s%u", "raid", i);
1813 		verify_raid_bdev_present(name, false);
1814 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1815 		bbdev_idx += g_max_base_drives;
1816 		rpc_bdev_raid_create(NULL, NULL);
1817 		CU_ASSERT(g_rpc_err == 0);
1818 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1819 	}
1820 
1821 	create_get_raids_req(&get_raids_req, "all", 0);
1822 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1823 	CU_ASSERT(g_rpc_err == 0);
1824 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1825 	for (i = 0; i < g_get_raids_count; i++) {
1826 		free(g_get_raids_output[i]);
1827 	}
1828 
1829 	create_get_raids_req(&get_raids_req, "online", 0);
1830 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1831 	CU_ASSERT(g_rpc_err == 0);
1832 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1833 	for (i = 0; i < g_get_raids_count; i++) {
1834 		free(g_get_raids_output[i]);
1835 	}
1836 
1837 	create_get_raids_req(&get_raids_req, "configuring", 0);
1838 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1839 	CU_ASSERT(g_rpc_err == 0);
1840 	CU_ASSERT(g_get_raids_count == 0);
1841 
1842 	create_get_raids_req(&get_raids_req, "offline", 0);
1843 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1844 	CU_ASSERT(g_rpc_err == 0);
1845 	CU_ASSERT(g_get_raids_count == 0);
1846 
1847 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
1848 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1849 	CU_ASSERT(g_rpc_err == 1);
1850 	CU_ASSERT(g_get_raids_count == 0);
1851 
1852 	create_get_raids_req(&get_raids_req, "all", 1);
1853 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1854 	CU_ASSERT(g_rpc_err == 1);
1855 	free(get_raids_req.category);
1856 	CU_ASSERT(g_get_raids_count == 0);
1857 
1858 	create_get_raids_req(&get_raids_req, "all", 0);
1859 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1860 	CU_ASSERT(g_rpc_err == 0);
1861 	CU_ASSERT(g_get_raids_count == g_max_raids);
1862 	for (i = 0; i < g_get_raids_count; i++) {
1863 		free(g_get_raids_output[i]);
1864 	}
1865 
1866 	for (i = 0; i < g_max_raids; i++) {
1867 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
1868 		snprintf(name, 16, "%s", construct_req[i].name);
1869 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1870 		rpc_bdev_raid_delete(NULL, NULL);
1871 		CU_ASSERT(g_rpc_err == 0);
1872 		verify_raid_bdev_present(name, false);
1873 	}
1874 	raid_bdev_exit();
1875 	for (i = 0; i < g_max_raids; i++) {
1876 		free_test_req(&construct_req[i]);
1877 	}
1878 	free(construct_req);
1879 	base_bdevs_cleanup();
1880 	reset_globals();
1881 }
1882 
1883 /* Create multiple raids, fire IOs on raids */
1884 static void
1885 test_multi_raid_with_io(void)
1886 {
1887 	struct rpc_bdev_raid_create *construct_req;
1888 	struct rpc_bdev_raid_delete destroy_req;
1889 	uint8_t i;
1890 	char name[16];
1891 	uint8_t bbdev_idx = 0;
1892 	struct raid_bdev *pbdev;
1893 	struct spdk_io_channel **channels;
1894 	struct spdk_bdev_io *bdev_io;
1895 	uint64_t io_len;
1896 	uint64_t lba = 0;
1897 	int16_t iotype;
1898 
1899 	set_globals();
1900 	construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
1901 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1902 	CU_ASSERT(raid_bdev_init() == 0);
1903 	channels = calloc(g_max_raids, sizeof(*channels));
1904 	SPDK_CU_ASSERT_FATAL(channels != NULL);
1905 
1906 	for (i = 0; i < g_max_raids; i++) {
1907 		snprintf(name, 16, "%s%u", "raid", i);
1908 		verify_raid_bdev_present(name, false);
1909 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1910 		bbdev_idx += g_max_base_drives;
1911 		rpc_bdev_raid_create(NULL, NULL);
1912 		CU_ASSERT(g_rpc_err == 0);
1913 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1914 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1915 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1916 				break;
1917 			}
1918 		}
1919 		CU_ASSERT(pbdev != NULL);
1920 
1921 		channels[i] = spdk_get_io_channel(pbdev);
1922 		SPDK_CU_ASSERT_FATAL(channels[i] != NULL);
1923 	}
1924 
1925 	/* This will perform a write on the first raid and a read on the second. It can be
1926 	 * expanded in the future to perform r/w on each raid device in the event that
1927 	 * multiple raid levels are supported.
1928 	 */
1929 	for (i = 0; i < g_max_raids; i++) {
1930 		struct spdk_io_channel *ch = channels[i];
1931 		struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch);
1932 
1933 		SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1934 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1935 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1936 		io_len = g_strip_size;
1937 		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
1938 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1939 		g_io_output_index = 0;
1940 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1941 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1942 				break;
1943 			}
1944 		}
1945 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype);
1946 		CU_ASSERT(pbdev != NULL);
1947 		raid_bdev_submit_request(ch, bdev_io);
1948 		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
1949 			  g_child_io_status_flag);
1950 		bdev_io_cleanup(bdev_io);
1951 	}
1952 
1953 	for (i = 0; i < g_max_raids; i++) {
1954 		spdk_put_io_channel(channels[i]);
1955 		snprintf(name, 16, "%s", construct_req[i].name);
1956 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1957 		rpc_bdev_raid_delete(NULL, NULL);
1958 		CU_ASSERT(g_rpc_err == 0);
1959 		verify_raid_bdev_present(name, false);
1960 	}
1961 	raid_bdev_exit();
1962 	for (i = 0; i < g_max_raids; i++) {
1963 		free_test_req(&construct_req[i]);
1964 	}
1965 	free(construct_req);
1966 	free(channels);
1967 	base_bdevs_cleanup();
1968 	reset_globals();
1969 }
1970 
1971 static void
1972 test_io_type_supported(void)
1973 {
1974 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
1975 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
1976 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
1977 }
1978 
1979 static void
1980 test_raid_json_dump_info(void)
1981 {
1982 	struct rpc_bdev_raid_create req;
1983 	struct rpc_bdev_raid_delete destroy_req;
1984 	struct raid_bdev *pbdev;
1985 
1986 	set_globals();
1987 	CU_ASSERT(raid_bdev_init() == 0);
1988 
1989 	verify_raid_bdev_present("raid1", false);
1990 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1991 	rpc_bdev_raid_create(NULL, NULL);
1992 	CU_ASSERT(g_rpc_err == 0);
1993 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1994 
1995 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1996 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1997 			break;
1998 		}
1999 	}
2000 	CU_ASSERT(pbdev != NULL);
2001 
2002 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
2003 
2004 	free_test_req(&req);
2005 
2006 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2007 	rpc_bdev_raid_delete(NULL, NULL);
2008 	CU_ASSERT(g_rpc_err == 0);
2009 	verify_raid_bdev_present("raid1", false);
2010 
2011 	raid_bdev_exit();
2012 	base_bdevs_cleanup();
2013 	reset_globals();
2014 }
2015 
2016 static void
2017 test_context_size(void)
2018 {
2019 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
2020 }
2021 
2022 static void
2023 test_raid_level_conversions(void)
2024 {
2025 	const char *raid_str;
2026 
2027 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
2028 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
2029 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
2030 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
2031 
2032 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
2033 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2034 	raid_str = raid_bdev_level_to_str(1234);
2035 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2036 	raid_str = raid_bdev_level_to_str(RAID0);
2037 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
2038 }
2039 
2040 static void
2041 test_create_raid_superblock(void)
2042 {
2043 	struct rpc_bdev_raid_create req;
2044 	struct rpc_bdev_raid_delete delete_req;
2045 
2046 	set_globals();
2047 	CU_ASSERT(raid_bdev_init() == 0);
2048 
2049 	verify_raid_bdev_present("raid1", false);
2050 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
2051 	rpc_bdev_raid_create(NULL, NULL);
2052 	CU_ASSERT(g_rpc_err == 0);
2053 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2054 	free_test_req(&req);
2055 
2056 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
2057 	rpc_bdev_raid_delete(NULL, NULL);
2058 	CU_ASSERT(g_rpc_err == 0);
2059 	raid_bdev_exit();
2060 	base_bdevs_cleanup();
2061 	reset_globals();
2062 }
2063 
2064 static void
2065 complete_process_request(void *ctx)
2066 {
2067 	struct raid_bdev_process_request *process_req = ctx;
2068 
2069 	raid_bdev_process_request_complete(process_req, 0);
2070 }
2071 
2072 static int
2073 submit_process_request(struct raid_bdev_process_request *process_req,
2074 		       struct raid_bdev_io_channel *raid_ch)
2075 {
2076 	struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch));
2077 
2078 	*(uint64_t *)raid_bdev->module_private += process_req->num_blocks;
2079 
2080 	spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req);
2081 
2082 	return process_req->num_blocks;
2083 }
2084 
2085 static void
2086 test_raid_process(void)
2087 {
2088 	struct rpc_bdev_raid_create req;
2089 	struct rpc_bdev_raid_delete destroy_req;
2090 	struct raid_bdev *pbdev;
2091 	struct spdk_bdev *base_bdev;
2092 	struct spdk_thread *process_thread;
2093 	uint64_t num_blocks_processed = 0;
2094 
2095 	set_globals();
2096 	CU_ASSERT(raid_bdev_init() == 0);
2097 
2098 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2099 	verify_raid_bdev_present("raid1", false);
2100 	TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) {
2101 		base_bdev->blockcnt = 128;
2102 	}
2103 	rpc_bdev_raid_create(NULL, NULL);
2104 	CU_ASSERT(g_rpc_err == 0);
2105 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2106 	free_test_req(&req);
2107 
2108 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2109 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2110 			break;
2111 		}
2112 	}
2113 	CU_ASSERT(pbdev != NULL);
2114 
2115 	pbdev->module->submit_process_request = submit_process_request;
2116 	pbdev->module_private = &num_blocks_processed;
2117 
2118 	CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0);
2119 	poll_threads();
2120 
2121 	SPDK_CU_ASSERT_FATAL(pbdev->process != NULL);
2122 
2123 	process_thread = spdk_thread_get_by_id(spdk_thread_get_id(spdk_get_thread()) + 1);
2124 
2125 	while (spdk_thread_poll(process_thread, 0, 0) > 0) {
2126 		poll_threads();
2127 	}
2128 
2129 	CU_ASSERT(pbdev->process == NULL);
2130 	CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt);
2131 
2132 	poll_threads();
2133 
2134 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2135 	rpc_bdev_raid_delete(NULL, NULL);
2136 	CU_ASSERT(g_rpc_err == 0);
2137 	verify_raid_bdev_present("raid1", false);
2138 
2139 	raid_bdev_exit();
2140 	base_bdevs_cleanup();
2141 	reset_globals();
2142 }
2143 
2144 static void
2145 test_raid_io_split(void)
2146 {
2147 	struct rpc_bdev_raid_create req;
2148 	struct rpc_bdev_raid_delete destroy_req;
2149 	struct raid_bdev *pbdev;
2150 	struct spdk_io_channel *ch;
2151 	struct raid_bdev_io_channel *raid_ch;
2152 	struct spdk_bdev_io *bdev_io;
2153 	struct raid_bdev_io *raid_io;
2154 	uint64_t split_offset;
2155 	struct iovec iovs_orig[4];
2156 	struct raid_bdev_process process = { };
2157 
2158 	set_globals();
2159 	CU_ASSERT(raid_bdev_init() == 0);
2160 
2161 	verify_raid_bdev_present("raid1", false);
2162 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2163 	rpc_bdev_raid_create(NULL, NULL);
2164 	CU_ASSERT(g_rpc_err == 0);
2165 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2166 
2167 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2168 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2169 			break;
2170 		}
2171 	}
2172 	CU_ASSERT(pbdev != NULL);
2173 	pbdev->bdev.md_len = 8;
2174 
2175 	process.raid_bdev = pbdev;
2176 	process.target = &pbdev->base_bdev_info[0];
2177 	pbdev->process = &process;
2178 	ch = spdk_get_io_channel(pbdev);
2179 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2180 	raid_ch = spdk_io_channel_get_ctx(ch);
2181 	g_bdev_io_defer_completion = true;
2182 
2183 	/* test split of bdev_io with 1 iovec */
2184 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2185 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2186 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2187 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE);
2188 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2189 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2190 	bdev_io->u.bdev.md_buf = (void *)0x1000000;
2191 	g_io_output_index = 0;
2192 
2193 	split_offset = 1;
2194 	raid_ch->process.offset = split_offset;
2195 	raid_bdev_submit_request(ch, bdev_io);
2196 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2197 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2198 	CU_ASSERT(raid_io->iovcnt == 1);
2199 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2200 	CU_ASSERT(raid_io->iovs == raid_io->split.iov);
2201 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len);
2202 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len);
2203 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2204 	complete_deferred_ios();
2205 	CU_ASSERT(raid_io->num_blocks == split_offset);
2206 	CU_ASSERT(raid_io->offset_blocks == 0);
2207 	CU_ASSERT(raid_io->iovcnt == 1);
2208 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2209 	CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len);
2210 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2211 	complete_deferred_ios();
2212 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2213 	CU_ASSERT(raid_io->offset_blocks == 0);
2214 	CU_ASSERT(raid_io->iovcnt == 1);
2215 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2216 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len);
2217 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2218 
2219 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2220 	CU_ASSERT(g_io_output_index == 2);
2221 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2222 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2223 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2224 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2225 	bdev_io_cleanup(bdev_io);
2226 
2227 	/* test split of bdev_io with 4 iovecs */
2228 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2229 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2230 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2231 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE,
2232 			    4, g_strip_size / 4 * g_block_len);
2233 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2234 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2235 	bdev_io->u.bdev.md_buf = (void *)0x1000000;
2236 	g_io_output_index = 0;
2237 
2238 	split_offset = 1; /* split at the first iovec */
2239 	raid_ch->process.offset = split_offset;
2240 	raid_bdev_submit_request(ch, bdev_io);
2241 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2242 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2243 	CU_ASSERT(raid_io->iovcnt == 4);
2244 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]);
2245 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]);
2246 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len);
2247 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len -  g_block_len);
2248 	CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0);
2249 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2250 	complete_deferred_ios();
2251 	CU_ASSERT(raid_io->num_blocks == split_offset);
2252 	CU_ASSERT(raid_io->offset_blocks == 0);
2253 	CU_ASSERT(raid_io->iovcnt == 1);
2254 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2255 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base);
2256 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2257 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2258 	complete_deferred_ios();
2259 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2260 	CU_ASSERT(raid_io->offset_blocks == 0);
2261 	CU_ASSERT(raid_io->iovcnt == 4);
2262 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2263 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2264 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2265 
2266 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2267 	CU_ASSERT(g_io_output_index == 2);
2268 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2269 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2270 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2271 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2272 
2273 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2274 	g_io_output_index = 0;
2275 
2276 	split_offset = g_strip_size / 2; /* split exactly between second and third iovec */
2277 	raid_ch->process.offset = split_offset;
2278 	raid_bdev_submit_request(ch, bdev_io);
2279 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2280 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2281 	CU_ASSERT(raid_io->iovcnt == 2);
2282 	CU_ASSERT(raid_io->split.iov == NULL);
2283 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2284 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2285 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2286 	complete_deferred_ios();
2287 	CU_ASSERT(raid_io->num_blocks == split_offset);
2288 	CU_ASSERT(raid_io->offset_blocks == 0);
2289 	CU_ASSERT(raid_io->iovcnt == 2);
2290 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2291 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2292 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2293 	complete_deferred_ios();
2294 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2295 	CU_ASSERT(raid_io->offset_blocks == 0);
2296 	CU_ASSERT(raid_io->iovcnt == 4);
2297 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2298 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2299 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2300 
2301 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2302 	CU_ASSERT(g_io_output_index == 2);
2303 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2304 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2305 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2306 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2307 
2308 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2309 	g_io_output_index = 0;
2310 
2311 	split_offset = g_strip_size / 2 + 1; /* split at the third iovec */
2312 	raid_ch->process.offset = split_offset;
2313 	raid_bdev_submit_request(ch, bdev_io);
2314 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2315 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2316 	CU_ASSERT(raid_io->iovcnt == 2);
2317 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]);
2318 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2319 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len);
2320 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len);
2321 	CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base);
2322 	CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len);
2323 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2324 	complete_deferred_ios();
2325 	CU_ASSERT(raid_io->num_blocks == split_offset);
2326 	CU_ASSERT(raid_io->offset_blocks == 0);
2327 	CU_ASSERT(raid_io->iovcnt == 3);
2328 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2329 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0);
2330 	CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base);
2331 	CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len);
2332 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2333 	complete_deferred_ios();
2334 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2335 	CU_ASSERT(raid_io->offset_blocks == 0);
2336 	CU_ASSERT(raid_io->iovcnt == 4);
2337 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2338 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2339 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2340 
2341 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2342 	CU_ASSERT(g_io_output_index == 2);
2343 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2344 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2345 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2346 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2347 
2348 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2349 	g_io_output_index = 0;
2350 
2351 	split_offset = g_strip_size - 1; /* split at the last iovec */
2352 	raid_ch->process.offset = split_offset;
2353 	raid_bdev_submit_request(ch, bdev_io);
2354 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2355 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2356 	CU_ASSERT(raid_io->iovcnt == 1);
2357 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]);
2358 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]);
2359 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len);
2360 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2361 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2362 	complete_deferred_ios();
2363 	CU_ASSERT(raid_io->num_blocks == split_offset);
2364 	CU_ASSERT(raid_io->offset_blocks == 0);
2365 	CU_ASSERT(raid_io->iovcnt == 4);
2366 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2367 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0);
2368 	CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base);
2369 	CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len);
2370 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2371 	complete_deferred_ios();
2372 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2373 	CU_ASSERT(raid_io->offset_blocks == 0);
2374 	CU_ASSERT(raid_io->iovcnt == 4);
2375 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2376 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2377 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2378 
2379 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2380 	CU_ASSERT(g_io_output_index == 2);
2381 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2382 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2383 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2384 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2385 	bdev_io_cleanup(bdev_io);
2386 
2387 	spdk_put_io_channel(ch);
2388 	free_test_req(&req);
2389 	pbdev->process = NULL;
2390 
2391 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2392 	rpc_bdev_raid_delete(NULL, NULL);
2393 	CU_ASSERT(g_rpc_err == 0);
2394 	verify_raid_bdev_present("raid1", false);
2395 
2396 	raid_bdev_exit();
2397 	base_bdevs_cleanup();
2398 	reset_globals();
2399 }
2400 
2401 static int
2402 test_bdev_ioch_create(void *io_device, void *ctx_buf)
2403 {
2404 	return 0;
2405 }
2406 
2407 static void
2408 test_bdev_ioch_destroy(void *io_device, void *ctx_buf)
2409 {
2410 }
2411 
2412 int
2413 main(int argc, char **argv)
2414 {
2415 	CU_pSuite       suite = NULL;
2416 	unsigned int    num_failures;
2417 
2418 	CU_initialize_registry();
2419 
2420 	suite = CU_add_suite("raid", NULL, NULL);
2421 
2422 	CU_ADD_TEST(suite, test_create_raid);
2423 	CU_ADD_TEST(suite, test_create_raid_superblock);
2424 	CU_ADD_TEST(suite, test_delete_raid);
2425 	CU_ADD_TEST(suite, test_create_raid_invalid_args);
2426 	CU_ADD_TEST(suite, test_delete_raid_invalid_args);
2427 	CU_ADD_TEST(suite, test_io_channel);
2428 	CU_ADD_TEST(suite, test_reset_io);
2429 	CU_ADD_TEST(suite, test_write_io);
2430 	CU_ADD_TEST(suite, test_read_io);
2431 	CU_ADD_TEST(suite, test_unmap_io);
2432 	CU_ADD_TEST(suite, test_io_failure);
2433 	CU_ADD_TEST(suite, test_multi_raid_no_io);
2434 	CU_ADD_TEST(suite, test_multi_raid_with_io);
2435 	CU_ADD_TEST(suite, test_io_type_supported);
2436 	CU_ADD_TEST(suite, test_raid_json_dump_info);
2437 	CU_ADD_TEST(suite, test_context_size);
2438 	CU_ADD_TEST(suite, test_raid_level_conversions);
2439 	CU_ADD_TEST(suite, test_raid_process);
2440 	CU_ADD_TEST(suite, test_raid_io_split);
2441 
2442 	allocate_threads(1);
2443 	set_thread(0);
2444 	spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0,
2445 				NULL);
2446 
2447 	set_test_opts();
2448 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2449 	CU_cleanup_registry();
2450 
2451 	spdk_io_device_unregister(&g_bdev_ch_io_device, NULL);
2452 	free_threads();
2453 
2454 	return num_failures;
2455 }
2456