xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision 45a053c5777494f4e8ce4bc1191c9de3920377f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "bdev/raid/raid0.c"
15 #include "common/lib/ut_multithread.c"
16 
17 #define MAX_BASE_DRIVES 32
18 #define MAX_RAIDS 2
19 #define INVALID_IO_SUBMIT 0xFFFF
20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
22 
23 struct spdk_bdev_channel {
24 	struct spdk_io_channel *channel;
25 };
26 
27 struct spdk_bdev_desc {
28 	struct spdk_bdev *bdev;
29 };
30 
31 /* Data structure to capture the output of IO for verification */
32 struct io_output {
33 	struct spdk_bdev_desc       *desc;
34 	struct spdk_io_channel      *ch;
35 	uint64_t                    offset_blocks;
36 	uint64_t                    num_blocks;
37 	spdk_bdev_io_completion_cb  cb;
38 	void                        *cb_arg;
39 	enum spdk_bdev_io_type      iotype;
40 };
41 
42 struct raid_io_ranges {
43 	uint64_t lba;
44 	uint64_t nblocks;
45 };
46 
47 /* Globals */
48 int g_bdev_io_submit_status;
49 struct io_output *g_io_output = NULL;
50 uint32_t g_io_output_index;
51 uint32_t g_io_comp_status;
52 bool g_child_io_status_flag;
53 void *g_rpc_req;
54 uint32_t g_rpc_req_size;
55 TAILQ_HEAD(bdev, spdk_bdev);
56 struct bdev g_bdev_list;
57 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
58 struct waitq g_io_waitq;
59 uint32_t g_block_len;
60 uint32_t g_strip_size;
61 uint32_t g_max_io_size;
62 uint8_t g_max_base_drives;
63 uint8_t g_max_raids;
64 uint8_t g_ignore_io_output;
65 uint8_t g_rpc_err;
66 char *g_get_raids_output[MAX_RAIDS];
67 uint32_t g_get_raids_count;
68 uint8_t g_json_decode_obj_err;
69 uint8_t g_json_decode_obj_create;
70 uint8_t g_config_level_create = 0;
71 uint8_t g_test_multi_raids;
72 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
73 uint32_t g_io_range_idx;
74 uint64_t g_lba_offset;
75 uint64_t g_bdev_ch_io_device;
76 bool g_bdev_io_defer_completion;
77 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
78 
79 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
80 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
81 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
82 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
83 		enum spdk_bdev_io_type io_type), true);
84 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
85 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
86 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
87 		void *cb_arg), 0);
88 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
89 	    NULL);
90 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
91 		uint32_t state_mask));
92 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
93 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
94 					struct spdk_json_write_ctx *w));
95 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
96 		bool value));
97 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
98 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
99 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
100 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
101 		spdk_json_decode_fn decode_func,
102 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
103 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
104 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
105 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
106 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
107 		const char *name), 0);
108 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
109 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
110 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
111 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
112 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
113 		const char *name), 0);
114 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
115 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
116 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
117 		uint64_t val), 0);
118 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
119 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
120 		struct spdk_bdev_io_wait_entry *entry), 0);
121 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
122 		struct spdk_memory_domain **domains,	int array_size), 0);
123 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
124 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
125 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
126 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), false);
127 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev),
128 	    SPDK_DIF_DISABLE);
129 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
130 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
131 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
132 
133 int
134 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
135 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
136 {
137 	cb(NULL, -EINVAL, cb_ctx);
138 
139 	return 0;
140 }
141 
142 void
143 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
144 {
145 	cb(0, raid_bdev, cb_ctx);
146 }
147 
148 const struct spdk_uuid *
149 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
150 {
151 	return &bdev->uuid;
152 }
153 
154 struct spdk_io_channel *
155 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
156 {
157 	return spdk_get_io_channel(&g_bdev_ch_io_device);
158 }
159 
160 static void
161 set_test_opts(void)
162 {
163 
164 	g_max_base_drives = MAX_BASE_DRIVES;
165 	g_max_raids = MAX_RAIDS;
166 	g_block_len = 4096;
167 	g_strip_size = 64;
168 	g_max_io_size = 1024;
169 
170 	printf("Test Options\n");
171 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
172 	       "g_max_raids = %u\n",
173 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
174 }
175 
176 /* Set globals before every test run */
177 static void
178 set_globals(void)
179 {
180 	uint32_t max_splits;
181 
182 	g_bdev_io_submit_status = 0;
183 	if (g_max_io_size < g_strip_size) {
184 		max_splits = 2;
185 	} else {
186 		max_splits = (g_max_io_size / g_strip_size) + 1;
187 	}
188 	if (max_splits < g_max_base_drives) {
189 		max_splits = g_max_base_drives;
190 	}
191 
192 	g_io_output = calloc(max_splits, sizeof(struct io_output));
193 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
194 	g_io_output_index = 0;
195 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
196 	g_get_raids_count = 0;
197 	g_io_comp_status = 0;
198 	g_ignore_io_output = 0;
199 	g_config_level_create = 0;
200 	g_rpc_err = 0;
201 	g_test_multi_raids = 0;
202 	g_child_io_status_flag = true;
203 	TAILQ_INIT(&g_bdev_list);
204 	TAILQ_INIT(&g_io_waitq);
205 	g_rpc_req = NULL;
206 	g_rpc_req_size = 0;
207 	g_json_decode_obj_err = 0;
208 	g_json_decode_obj_create = 0;
209 	g_lba_offset = 0;
210 	g_bdev_io_defer_completion = false;
211 }
212 
213 static void
214 base_bdevs_cleanup(void)
215 {
216 	struct spdk_bdev *bdev;
217 	struct spdk_bdev *bdev_next;
218 
219 	if (!TAILQ_EMPTY(&g_bdev_list)) {
220 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
221 			free(bdev->name);
222 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
223 			free(bdev);
224 		}
225 	}
226 }
227 
228 static void
229 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
230 {
231 	struct raid_base_bdev_info *base_info;
232 
233 	assert(raid_bdev != NULL);
234 	assert(raid_bdev->base_bdev_info != NULL);
235 
236 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
237 		if (base_info->desc) {
238 			raid_bdev_free_base_bdev_resource(base_info);
239 		}
240 	}
241 	assert(raid_bdev->num_base_bdevs_discovered == 0);
242 	raid_bdev_cleanup_and_free(raid_bdev);
243 }
244 
245 /* Reset globals */
246 static void
247 reset_globals(void)
248 {
249 	if (g_io_output) {
250 		free(g_io_output);
251 		g_io_output = NULL;
252 	}
253 	g_rpc_req = NULL;
254 	g_rpc_req_size = 0;
255 }
256 
257 void
258 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
259 		     uint64_t len)
260 {
261 	cb(bdev_io->internal.ch->channel, bdev_io, true);
262 }
263 
264 /* Store the IO completion status in global variable to verify by various tests */
265 void
266 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
267 {
268 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
269 }
270 
271 static void
272 set_io_output(struct io_output *output,
273 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
274 	      uint64_t offset_blocks, uint64_t num_blocks,
275 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
276 	      enum spdk_bdev_io_type iotype)
277 {
278 	output->desc = desc;
279 	output->ch = ch;
280 	output->offset_blocks = offset_blocks;
281 	output->num_blocks = num_blocks;
282 	output->cb = cb;
283 	output->cb_arg = cb_arg;
284 	output->iotype = iotype;
285 }
286 
287 static void
288 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
289 {
290 	if (g_bdev_io_defer_completion) {
291 		child_io->internal.cb = cb;
292 		child_io->internal.caller_ctx = cb_arg;
293 		TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link);
294 	} else {
295 		cb(child_io, g_child_io_status_flag, cb_arg);
296 	}
297 }
298 
299 static void
300 complete_deferred_ios(void)
301 {
302 	struct spdk_bdev_io *child_io, *tmp;
303 
304 	TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) {
305 		TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
306 		child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
307 	}
308 }
309 
310 /* It will cache the split IOs for verification */
311 int
312 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
313 			struct iovec *iov, int iovcnt,
314 			uint64_t offset_blocks, uint64_t num_blocks,
315 			spdk_bdev_io_completion_cb cb, void *cb_arg)
316 {
317 	struct io_output *output = &g_io_output[g_io_output_index];
318 	struct spdk_bdev_io *child_io;
319 
320 	if (g_ignore_io_output) {
321 		return 0;
322 	}
323 
324 	if (g_max_io_size < g_strip_size) {
325 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
326 	} else {
327 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
328 	}
329 	if (g_bdev_io_submit_status == 0) {
330 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
331 			      SPDK_BDEV_IO_TYPE_WRITE);
332 		g_io_output_index++;
333 
334 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
335 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
336 		child_io_complete(child_io, cb, cb_arg);
337 	}
338 
339 	return g_bdev_io_submit_status;
340 }
341 
342 int
343 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
344 			    struct iovec *iov, int iovcnt,
345 			    uint64_t offset_blocks, uint64_t num_blocks,
346 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
347 			    struct spdk_bdev_ext_io_opts *opts)
348 {
349 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
350 }
351 
352 int
353 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
354 				struct iovec *iov, int iovcnt, void *md,
355 				uint64_t offset_blocks, uint64_t num_blocks,
356 				spdk_bdev_io_completion_cb cb, void *cb_arg)
357 {
358 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
359 }
360 
361 int
362 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
363 		spdk_bdev_io_completion_cb cb, void *cb_arg)
364 {
365 	struct io_output *output = &g_io_output[g_io_output_index];
366 	struct spdk_bdev_io *child_io;
367 
368 	if (g_ignore_io_output) {
369 		return 0;
370 	}
371 
372 	if (g_bdev_io_submit_status == 0) {
373 		set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET);
374 		g_io_output_index++;
375 
376 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
377 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
378 		child_io_complete(child_io, cb, cb_arg);
379 	}
380 
381 	return g_bdev_io_submit_status;
382 }
383 
384 int
385 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
386 		       uint64_t offset_blocks, uint64_t num_blocks,
387 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
388 {
389 	struct io_output *output = &g_io_output[g_io_output_index];
390 	struct spdk_bdev_io *child_io;
391 
392 	if (g_ignore_io_output) {
393 		return 0;
394 	}
395 
396 	if (g_bdev_io_submit_status == 0) {
397 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
398 			      SPDK_BDEV_IO_TYPE_UNMAP);
399 		g_io_output_index++;
400 
401 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
402 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
403 		child_io_complete(child_io, cb, cb_arg);
404 	}
405 
406 	return g_bdev_io_submit_status;
407 }
408 
409 void
410 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
411 {
412 	CU_ASSERT(bdeverrno == 0);
413 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
414 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
415 }
416 
417 void
418 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
419 {
420 	int ret;
421 
422 	bdev->internal.unregister_cb = cb_fn;
423 	bdev->internal.unregister_ctx = cb_arg;
424 
425 	ret = bdev->fn_table->destruct(bdev->ctxt);
426 	CU_ASSERT(ret == 1);
427 
428 	poll_threads();
429 }
430 
431 int
432 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
433 		   void *event_ctx, struct spdk_bdev_desc **_desc)
434 {
435 	struct spdk_bdev *bdev;
436 
437 	bdev = spdk_bdev_get_by_name(bdev_name);
438 	if (bdev == NULL) {
439 		return -ENODEV;
440 	}
441 
442 	*_desc = (void *)bdev;
443 	return 0;
444 }
445 
446 struct spdk_bdev *
447 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
448 {
449 	return (void *)desc;
450 }
451 
452 int
453 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
454 {
455 	if (!g_test_multi_raids) {
456 		struct rpc_bdev_raid_create *req = g_rpc_req;
457 		if (strcmp(name, "strip_size_kb") == 0) {
458 			CU_ASSERT(req->strip_size_kb == val);
459 		} else if (strcmp(name, "blocklen_shift") == 0) {
460 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
461 		} else if (strcmp(name, "num_base_bdevs") == 0) {
462 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
463 		} else if (strcmp(name, "state") == 0) {
464 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
465 		} else if (strcmp(name, "destruct_called") == 0) {
466 			CU_ASSERT(val == 0);
467 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
468 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
469 		}
470 	}
471 	return 0;
472 }
473 
474 int
475 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
476 {
477 	if (g_test_multi_raids) {
478 		if (strcmp(name, "name") == 0) {
479 			g_get_raids_output[g_get_raids_count] = strdup(val);
480 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
481 			g_get_raids_count++;
482 		}
483 	} else {
484 		struct rpc_bdev_raid_create *req = g_rpc_req;
485 		if (strcmp(name, "raid_level") == 0) {
486 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
487 		}
488 	}
489 	return 0;
490 }
491 
492 int
493 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
494 {
495 	if (!g_test_multi_raids) {
496 		struct rpc_bdev_raid_create *req = g_rpc_req;
497 		if (strcmp(name, "superblock") == 0) {
498 			CU_ASSERT(val == req->superblock_enabled);
499 		}
500 	}
501 	return 0;
502 }
503 
504 void
505 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
506 {
507 	if (bdev_io) {
508 		free(bdev_io);
509 	}
510 }
511 
512 /* It will cache split IOs for verification */
513 int
514 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
515 		       struct iovec *iov, int iovcnt,
516 		       uint64_t offset_blocks, uint64_t num_blocks,
517 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
518 {
519 	struct io_output *output = &g_io_output[g_io_output_index];
520 	struct spdk_bdev_io *child_io;
521 
522 	if (g_ignore_io_output) {
523 		return 0;
524 	}
525 
526 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
527 	if (g_bdev_io_submit_status == 0) {
528 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
529 			      SPDK_BDEV_IO_TYPE_READ);
530 		g_io_output_index++;
531 
532 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
533 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
534 		child_io_complete(child_io, cb, cb_arg);
535 	}
536 
537 	return g_bdev_io_submit_status;
538 }
539 
540 int
541 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
542 			   struct iovec *iov, int iovcnt,
543 			   uint64_t offset_blocks, uint64_t num_blocks,
544 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
545 			   struct spdk_bdev_ext_io_opts *opts)
546 {
547 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
548 }
549 
550 int
551 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc,	struct spdk_io_channel *ch,
552 			       struct iovec *iov, int iovcnt, void *md,
553 			       uint64_t offset_blocks, uint64_t num_blocks,
554 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
555 {
556 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
557 }
558 
559 
560 void
561 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
562 {
563 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
564 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
565 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
566 	bdev->internal.claim.v1.module = NULL;
567 }
568 
569 int
570 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
571 			    struct spdk_bdev_module *module)
572 {
573 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
574 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
575 		return -1;
576 	}
577 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
578 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
579 	bdev->internal.claim.v1.module = module;
580 	return 0;
581 }
582 
583 int
584 spdk_json_decode_object(const struct spdk_json_val *values,
585 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
586 			void *out)
587 {
588 	struct rpc_bdev_raid_create *req, *_out;
589 	size_t i;
590 
591 	if (g_json_decode_obj_err) {
592 		return -1;
593 	} else if (g_json_decode_obj_create) {
594 		req = g_rpc_req;
595 		_out = out;
596 
597 		_out->name = strdup(req->name);
598 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
599 		_out->strip_size_kb = req->strip_size_kb;
600 		_out->level = req->level;
601 		_out->superblock_enabled = req->superblock_enabled;
602 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
603 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
604 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
605 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
606 		}
607 	} else {
608 		memcpy(out, g_rpc_req, g_rpc_req_size);
609 	}
610 
611 	return 0;
612 }
613 
614 struct spdk_json_write_ctx *
615 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
616 {
617 	return (void *)1;
618 }
619 
620 void
621 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
622 				 int error_code, const char *msg)
623 {
624 	g_rpc_err = 1;
625 }
626 
627 void
628 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
629 				     int error_code, const char *fmt, ...)
630 {
631 	g_rpc_err = 1;
632 }
633 
634 struct spdk_bdev *
635 spdk_bdev_get_by_name(const char *bdev_name)
636 {
637 	struct spdk_bdev *bdev;
638 
639 	if (!TAILQ_EMPTY(&g_bdev_list)) {
640 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
641 			if (strcmp(bdev_name, bdev->name) == 0) {
642 				return bdev;
643 			}
644 		}
645 	}
646 
647 	return NULL;
648 }
649 
650 int
651 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
652 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
653 {
654 	if (cb_fn) {
655 		cb_fn(cb_arg, 0);
656 	}
657 
658 	return 0;
659 }
660 
661 int
662 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
663 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
664 {
665 	if (cb_fn) {
666 		cb_fn(cb_arg, 0);
667 	}
668 
669 	return 0;
670 }
671 
672 int
673 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
674 			uint64_t offset, uint64_t length,
675 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
676 {
677 	if (cb_fn) {
678 		cb_fn(cb_arg, 0);
679 	}
680 
681 	return 0;
682 }
683 
684 int
685 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
686 			  uint64_t offset, uint64_t length,
687 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
688 {
689 	if (cb_fn) {
690 		cb_fn(cb_arg, 0);
691 	}
692 
693 	return 0;
694 }
695 
696 static void
697 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
698 {
699 	if (bdev_io->u.bdev.iovs) {
700 		int i;
701 
702 		for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
703 			free(bdev_io->u.bdev.iovs[i].iov_base);
704 		}
705 		free(bdev_io->u.bdev.iovs);
706 	}
707 	free(bdev_io);
708 }
709 
710 static void
711 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch,
712 		    struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype,
713 		    int iovcnt, size_t iov_len)
714 {
715 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
716 	int i;
717 
718 	bdev_io->bdev = bdev;
719 	bdev_io->u.bdev.offset_blocks = lba;
720 	bdev_io->u.bdev.num_blocks = blocks;
721 	bdev_io->type = iotype;
722 	bdev_io->internal.ch = channel;
723 	bdev_io->u.bdev.iovcnt = iovcnt;
724 
725 	if (iovcnt == 0) {
726 		bdev_io->u.bdev.iovs = NULL;
727 		return;
728 	}
729 
730 	SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len);
731 
732 	bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec));
733 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
734 
735 	for (i = 0; i < iovcnt; i++) {
736 		struct iovec *iov = &bdev_io->u.bdev.iovs[i];
737 
738 		iov->iov_base = calloc(1, iov_len);
739 		SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
740 		iov->iov_len = iov_len;
741 	}
742 }
743 
744 static void
745 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
746 		   uint64_t lba, uint64_t blocks, int16_t iotype)
747 {
748 	int iovcnt;
749 	size_t iov_len;
750 
751 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
752 		iovcnt = 0;
753 		iov_len = 0;
754 	} else {
755 		iovcnt = 1;
756 		iov_len = blocks * g_block_len;
757 	}
758 
759 	_bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len);
760 }
761 
762 static void
763 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
764 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
765 {
766 	uint8_t index = 0;
767 	struct io_output *output;
768 
769 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
770 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
771 	SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
772 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
773 
774 	CU_ASSERT(g_io_output_index == num_base_drives);
775 	for (index = 0; index < g_io_output_index; index++) {
776 		output = &g_io_output[index];
777 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
778 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
779 		CU_ASSERT(bdev_io->type == output->iotype);
780 	}
781 	CU_ASSERT(g_io_comp_status == io_status);
782 }
783 
784 static void
785 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
786 	  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
787 {
788 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
789 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
790 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
791 			     strip_shift;
792 	uint32_t splits_reqd = (end_strip - start_strip + 1);
793 	uint32_t strip;
794 	uint64_t pd_strip;
795 	uint8_t pd_idx;
796 	uint32_t offset_in_strip;
797 	uint64_t pd_lba;
798 	uint64_t pd_blocks;
799 	uint32_t index = 0;
800 	struct io_output *output;
801 
802 	if (io_status == INVALID_IO_SUBMIT) {
803 		CU_ASSERT(g_io_comp_status == false);
804 		return;
805 	}
806 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
807 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
808 
809 	CU_ASSERT(splits_reqd == g_io_output_index);
810 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
811 		pd_strip = strip / num_base_drives;
812 		pd_idx = strip % num_base_drives;
813 		if (strip == start_strip) {
814 			offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
815 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
816 			if (strip == end_strip) {
817 				pd_blocks = bdev_io->u.bdev.num_blocks;
818 			} else {
819 				pd_blocks = g_strip_size - offset_in_strip;
820 			}
821 		} else if (strip == end_strip) {
822 			pd_lba = pd_strip << strip_shift;
823 			pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
824 				     (g_strip_size - 1)) + 1;
825 		} else {
826 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
827 			pd_blocks = raid_bdev->strip_size;
828 		}
829 		output = &g_io_output[index];
830 		CU_ASSERT(pd_lba == output->offset_blocks);
831 		CU_ASSERT(pd_blocks == output->num_blocks);
832 		CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
833 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
834 		CU_ASSERT(bdev_io->type == output->iotype);
835 	}
836 	CU_ASSERT(g_io_comp_status == io_status);
837 }
838 
839 static void
840 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
841 			  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
842 			  uint32_t io_status)
843 {
844 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
845 	uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
846 	uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
847 				       g_strip_size;
848 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
849 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
850 			     strip_shift;
851 	uint8_t n_disks_involved;
852 	uint64_t start_strip_disk_idx;
853 	uint64_t end_strip_disk_idx;
854 	uint64_t nblocks_in_start_disk;
855 	uint64_t offset_in_start_disk;
856 	uint8_t disk_idx;
857 	uint64_t base_io_idx;
858 	uint64_t sum_nblocks = 0;
859 	struct io_output *output;
860 
861 	if (io_status == INVALID_IO_SUBMIT) {
862 		CU_ASSERT(g_io_comp_status == false);
863 		return;
864 	}
865 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
866 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
867 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
868 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
869 
870 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
871 	CU_ASSERT(n_disks_involved == g_io_output_index);
872 
873 	start_strip_disk_idx = start_strip % num_base_drives;
874 	end_strip_disk_idx = end_strip % num_base_drives;
875 
876 	offset_in_start_disk = g_io_output[0].offset_blocks;
877 	nblocks_in_start_disk = g_io_output[0].num_blocks;
878 
879 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
880 	     base_io_idx++, disk_idx++) {
881 		uint64_t start_offset_in_disk;
882 		uint64_t end_offset_in_disk;
883 
884 		output = &g_io_output[base_io_idx];
885 
886 		/* round disk_idx */
887 		if (disk_idx >= num_base_drives) {
888 			disk_idx %= num_base_drives;
889 		}
890 
891 		/* start_offset_in_disk aligned in strip check:
892 		 * The first base io has a same start_offset_in_strip with the whole raid io.
893 		 * Other base io should have aligned start_offset_in_strip which is 0.
894 		 */
895 		start_offset_in_disk = output->offset_blocks;
896 		if (base_io_idx == 0) {
897 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
898 		} else {
899 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
900 		}
901 
902 		/* end_offset_in_disk aligned in strip check:
903 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
904 		 * with the whole raid io.
905 		 * Other base io should have aligned end_offset_in_strip.
906 		 */
907 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
908 		if (disk_idx == end_strip_disk_idx) {
909 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
910 		} else {
911 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
912 		}
913 
914 		/* start_offset_in_disk compared with start_disk.
915 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
916 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
917 		 *    must be less than strip size.
918 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
919 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
920 		 *    be less than strip size.
921 		 */
922 		if (disk_idx > start_strip_disk_idx) {
923 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
924 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
925 		} else if (disk_idx < start_strip_disk_idx) {
926 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
927 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
928 		}
929 
930 		/* nblocks compared with start_disk:
931 		 * The gap between them must be within a strip size.
932 		 */
933 		if (output->num_blocks <= nblocks_in_start_disk) {
934 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
935 		} else {
936 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
937 		}
938 
939 		sum_nblocks += output->num_blocks;
940 
941 		CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
942 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
943 		CU_ASSERT(bdev_io->type == output->iotype);
944 	}
945 
946 	/* Sum of each nblocks should be same with raid bdev_io */
947 	CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
948 
949 	CU_ASSERT(g_io_comp_status == io_status);
950 }
951 
952 static void
953 verify_raid_bdev_present(const char *name, bool presence)
954 {
955 	struct raid_bdev *pbdev;
956 	bool   pbdev_found;
957 
958 	pbdev_found = false;
959 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
960 		if (strcmp(pbdev->bdev.name, name) == 0) {
961 			pbdev_found = true;
962 			break;
963 		}
964 	}
965 	if (presence == true) {
966 		CU_ASSERT(pbdev_found == true);
967 	} else {
968 		CU_ASSERT(pbdev_found == false);
969 	}
970 }
971 
972 static void
973 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
974 {
975 	struct raid_bdev *pbdev;
976 	struct raid_base_bdev_info *base_info;
977 	struct spdk_bdev *bdev = NULL;
978 	bool   pbdev_found;
979 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
980 
981 	pbdev_found = false;
982 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
983 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
984 			pbdev_found = true;
985 			if (presence == false) {
986 				break;
987 			}
988 			CU_ASSERT(pbdev->base_bdev_info != NULL);
989 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
990 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
991 					g_block_len)));
992 			CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
993 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
994 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
995 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
996 			CU_ASSERT(pbdev->level == r->level);
997 			CU_ASSERT(pbdev->base_bdev_info != NULL);
998 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
999 				CU_ASSERT(base_info->desc != NULL);
1000 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
1001 				CU_ASSERT(bdev != NULL);
1002 				CU_ASSERT(base_info->remove_scheduled == false);
1003 				CU_ASSERT((pbdev->sb != NULL && base_info->data_offset != 0) ||
1004 					  (pbdev->sb == NULL && base_info->data_offset == 0));
1005 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
1006 
1007 				if (bdev && base_info->data_size < min_blockcnt) {
1008 					min_blockcnt = base_info->data_size;
1009 				}
1010 			}
1011 			CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
1012 				    (r->strip_size_kb * 1024 / g_block_len)) *
1013 				   r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
1014 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
1015 			CU_ASSERT(pbdev->bdev.write_cache == 0);
1016 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
1017 			if (pbdev->num_base_bdevs > 1) {
1018 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
1019 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
1020 			} else {
1021 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
1022 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
1023 			}
1024 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
1025 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
1026 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
1027 			break;
1028 		}
1029 	}
1030 	if (presence == true) {
1031 		CU_ASSERT(pbdev_found == true);
1032 	} else {
1033 		CU_ASSERT(pbdev_found == false);
1034 	}
1035 }
1036 
1037 static void
1038 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
1039 		 uint8_t g_max_raids,
1040 		 char **g_get_raids_output, uint32_t g_get_raids_count)
1041 {
1042 	uint8_t i, j;
1043 	bool found;
1044 
1045 	CU_ASSERT(g_max_raids == g_get_raids_count);
1046 	if (g_max_raids == g_get_raids_count) {
1047 		for (i = 0; i < g_max_raids; i++) {
1048 			found = false;
1049 			for (j = 0; j < g_max_raids; j++) {
1050 				if (construct_req[i].name &&
1051 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
1052 					found = true;
1053 					break;
1054 				}
1055 			}
1056 			CU_ASSERT(found == true);
1057 		}
1058 	}
1059 }
1060 
1061 static void
1062 create_base_bdevs(uint32_t bbdev_start_idx)
1063 {
1064 	uint8_t i;
1065 	struct spdk_bdev *base_bdev;
1066 	char name[16];
1067 
1068 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
1069 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
1070 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
1071 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
1072 		base_bdev->name = strdup(name);
1073 		spdk_uuid_generate(&base_bdev->uuid);
1074 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
1075 		base_bdev->blocklen = g_block_len;
1076 		base_bdev->blockcnt = BLOCK_CNT;
1077 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
1078 	}
1079 }
1080 
1081 static void
1082 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1083 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
1084 {
1085 	uint8_t i;
1086 	char name[16];
1087 	uint8_t bbdev_idx = bbdev_start_idx;
1088 
1089 	r->name = strdup(raid_name);
1090 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1091 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
1092 	r->level = RAID0;
1093 	r->superblock_enabled = superblock_enabled;
1094 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
1095 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
1096 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
1097 		r->base_bdevs.base_bdevs[i] = strdup(name);
1098 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
1099 	}
1100 	if (create_base_bdev == true) {
1101 		create_base_bdevs(bbdev_start_idx);
1102 	}
1103 	g_rpc_req = r;
1104 	g_rpc_req_size = sizeof(*r);
1105 }
1106 
1107 static void
1108 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1109 			    uint8_t bbdev_start_idx, bool create_base_bdev,
1110 			    uint8_t json_decode_obj_err, bool superblock_enabled)
1111 {
1112 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
1113 
1114 	g_rpc_err = 0;
1115 	g_json_decode_obj_create = 1;
1116 	g_json_decode_obj_err = json_decode_obj_err;
1117 	g_config_level_create = 0;
1118 	g_test_multi_raids = 0;
1119 }
1120 
1121 static void
1122 free_test_req(struct rpc_bdev_raid_create *r)
1123 {
1124 	uint8_t i;
1125 
1126 	free(r->name);
1127 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
1128 		free(r->base_bdevs.base_bdevs[i]);
1129 	}
1130 }
1131 
1132 static void
1133 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
1134 			    uint8_t json_decode_obj_err)
1135 {
1136 	r->name = strdup(raid_name);
1137 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1138 
1139 	g_rpc_req = r;
1140 	g_rpc_req_size = sizeof(*r);
1141 	g_rpc_err = 0;
1142 	g_json_decode_obj_create = 0;
1143 	g_json_decode_obj_err = json_decode_obj_err;
1144 	g_config_level_create = 0;
1145 	g_test_multi_raids = 0;
1146 }
1147 
1148 static void
1149 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
1150 		     uint8_t json_decode_obj_err)
1151 {
1152 	r->category = strdup(category);
1153 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
1154 
1155 	g_rpc_req = r;
1156 	g_rpc_req_size = sizeof(*r);
1157 	g_rpc_err = 0;
1158 	g_json_decode_obj_create = 0;
1159 	g_json_decode_obj_err = json_decode_obj_err;
1160 	g_config_level_create = 0;
1161 	g_test_multi_raids = 1;
1162 	g_get_raids_count = 0;
1163 }
1164 
1165 static void
1166 test_create_raid(void)
1167 {
1168 	struct rpc_bdev_raid_create req;
1169 	struct rpc_bdev_raid_delete delete_req;
1170 
1171 	set_globals();
1172 	CU_ASSERT(raid_bdev_init() == 0);
1173 
1174 	verify_raid_bdev_present("raid1", false);
1175 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1176 	rpc_bdev_raid_create(NULL, NULL);
1177 	CU_ASSERT(g_rpc_err == 0);
1178 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1179 	free_test_req(&req);
1180 
1181 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1182 	rpc_bdev_raid_delete(NULL, NULL);
1183 	CU_ASSERT(g_rpc_err == 0);
1184 	raid_bdev_exit();
1185 	base_bdevs_cleanup();
1186 	reset_globals();
1187 }
1188 
1189 static void
1190 test_delete_raid(void)
1191 {
1192 	struct rpc_bdev_raid_create construct_req;
1193 	struct rpc_bdev_raid_delete delete_req;
1194 
1195 	set_globals();
1196 	CU_ASSERT(raid_bdev_init() == 0);
1197 
1198 	verify_raid_bdev_present("raid1", false);
1199 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1200 	rpc_bdev_raid_create(NULL, NULL);
1201 	CU_ASSERT(g_rpc_err == 0);
1202 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1203 	free_test_req(&construct_req);
1204 
1205 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1206 	rpc_bdev_raid_delete(NULL, NULL);
1207 	CU_ASSERT(g_rpc_err == 0);
1208 	verify_raid_bdev_present("raid1", false);
1209 
1210 	raid_bdev_exit();
1211 	base_bdevs_cleanup();
1212 	reset_globals();
1213 }
1214 
1215 static void
1216 test_create_raid_invalid_args(void)
1217 {
1218 	struct rpc_bdev_raid_create req;
1219 	struct rpc_bdev_raid_delete destroy_req;
1220 	struct raid_bdev *raid_bdev;
1221 
1222 	set_globals();
1223 	CU_ASSERT(raid_bdev_init() == 0);
1224 
1225 	verify_raid_bdev_present("raid1", false);
1226 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1227 	req.level = INVALID_RAID_LEVEL;
1228 	rpc_bdev_raid_create(NULL, NULL);
1229 	CU_ASSERT(g_rpc_err == 1);
1230 	free_test_req(&req);
1231 	verify_raid_bdev_present("raid1", false);
1232 
1233 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
1234 	rpc_bdev_raid_create(NULL, NULL);
1235 	CU_ASSERT(g_rpc_err == 1);
1236 	free_test_req(&req);
1237 	verify_raid_bdev_present("raid1", false);
1238 
1239 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1240 	req.strip_size_kb = 1231;
1241 	rpc_bdev_raid_create(NULL, NULL);
1242 	CU_ASSERT(g_rpc_err == 1);
1243 	free_test_req(&req);
1244 	verify_raid_bdev_present("raid1", false);
1245 
1246 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1247 	rpc_bdev_raid_create(NULL, NULL);
1248 	CU_ASSERT(g_rpc_err == 0);
1249 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1250 	free_test_req(&req);
1251 
1252 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1253 	rpc_bdev_raid_create(NULL, NULL);
1254 	CU_ASSERT(g_rpc_err == 1);
1255 	free_test_req(&req);
1256 
1257 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
1258 	rpc_bdev_raid_create(NULL, NULL);
1259 	CU_ASSERT(g_rpc_err == 1);
1260 	free_test_req(&req);
1261 	verify_raid_bdev_present("raid2", false);
1262 
1263 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1264 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1265 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1266 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1267 	rpc_bdev_raid_create(NULL, NULL);
1268 	CU_ASSERT(g_rpc_err == 1);
1269 	free_test_req(&req);
1270 	verify_raid_bdev_present("raid2", false);
1271 
1272 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1273 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1274 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1275 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1276 	rpc_bdev_raid_create(NULL, NULL);
1277 	CU_ASSERT(g_rpc_err == 0);
1278 	free_test_req(&req);
1279 	verify_raid_bdev_present("raid2", true);
1280 	raid_bdev = raid_bdev_find_by_name("raid2");
1281 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1282 	check_and_remove_raid_bdev(raid_bdev);
1283 
1284 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1285 	rpc_bdev_raid_create(NULL, NULL);
1286 	CU_ASSERT(g_rpc_err == 0);
1287 	free_test_req(&req);
1288 	verify_raid_bdev_present("raid2", true);
1289 	verify_raid_bdev_present("raid1", true);
1290 
1291 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1292 	rpc_bdev_raid_delete(NULL, NULL);
1293 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1294 	rpc_bdev_raid_delete(NULL, NULL);
1295 	raid_bdev_exit();
1296 	base_bdevs_cleanup();
1297 	reset_globals();
1298 }
1299 
1300 static void
1301 test_delete_raid_invalid_args(void)
1302 {
1303 	struct rpc_bdev_raid_create construct_req;
1304 	struct rpc_bdev_raid_delete destroy_req;
1305 
1306 	set_globals();
1307 	CU_ASSERT(raid_bdev_init() == 0);
1308 
1309 	verify_raid_bdev_present("raid1", false);
1310 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1311 	rpc_bdev_raid_create(NULL, NULL);
1312 	CU_ASSERT(g_rpc_err == 0);
1313 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1314 	free_test_req(&construct_req);
1315 
1316 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1317 	rpc_bdev_raid_delete(NULL, NULL);
1318 	CU_ASSERT(g_rpc_err == 1);
1319 
1320 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1321 	rpc_bdev_raid_delete(NULL, NULL);
1322 	CU_ASSERT(g_rpc_err == 1);
1323 	free(destroy_req.name);
1324 	verify_raid_bdev_present("raid1", true);
1325 
1326 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1327 	rpc_bdev_raid_delete(NULL, NULL);
1328 	CU_ASSERT(g_rpc_err == 0);
1329 	verify_raid_bdev_present("raid1", false);
1330 
1331 	raid_bdev_exit();
1332 	base_bdevs_cleanup();
1333 	reset_globals();
1334 }
1335 
1336 static void
1337 test_io_channel(void)
1338 {
1339 	struct rpc_bdev_raid_create req;
1340 	struct rpc_bdev_raid_delete destroy_req;
1341 	struct raid_bdev *pbdev;
1342 	struct spdk_io_channel *ch;
1343 	struct raid_bdev_io_channel *ch_ctx;
1344 
1345 	set_globals();
1346 	CU_ASSERT(raid_bdev_init() == 0);
1347 
1348 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1349 	verify_raid_bdev_present("raid1", false);
1350 	rpc_bdev_raid_create(NULL, NULL);
1351 	CU_ASSERT(g_rpc_err == 0);
1352 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1353 
1354 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1355 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1356 			break;
1357 		}
1358 	}
1359 	CU_ASSERT(pbdev != NULL);
1360 
1361 	ch = spdk_get_io_channel(pbdev);
1362 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1363 
1364 	ch_ctx = spdk_io_channel_get_ctx(ch);
1365 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1366 
1367 	free_test_req(&req);
1368 
1369 	spdk_put_io_channel(ch);
1370 
1371 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1372 	rpc_bdev_raid_delete(NULL, NULL);
1373 	CU_ASSERT(g_rpc_err == 0);
1374 	verify_raid_bdev_present("raid1", false);
1375 
1376 	raid_bdev_exit();
1377 	base_bdevs_cleanup();
1378 	reset_globals();
1379 }
1380 
1381 static void
1382 test_write_io(void)
1383 {
1384 	struct rpc_bdev_raid_create req;
1385 	struct rpc_bdev_raid_delete destroy_req;
1386 	struct raid_bdev *pbdev;
1387 	struct spdk_io_channel *ch;
1388 	struct raid_bdev_io_channel *ch_ctx;
1389 	uint8_t i;
1390 	struct spdk_bdev_io *bdev_io;
1391 	uint64_t io_len;
1392 	uint64_t lba = 0;
1393 
1394 	set_globals();
1395 	CU_ASSERT(raid_bdev_init() == 0);
1396 
1397 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1398 	verify_raid_bdev_present("raid1", false);
1399 	rpc_bdev_raid_create(NULL, NULL);
1400 	CU_ASSERT(g_rpc_err == 0);
1401 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1402 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1403 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1404 			break;
1405 		}
1406 	}
1407 	CU_ASSERT(pbdev != NULL);
1408 
1409 	ch = spdk_get_io_channel(pbdev);
1410 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1411 
1412 	ch_ctx = spdk_io_channel_get_ctx(ch);
1413 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1414 
1415 	/* test 2 IO sizes based on global strip size set earlier */
1416 	for (i = 0; i < 2; i++) {
1417 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1418 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1419 		io_len = (g_strip_size / 2) << i;
1420 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1421 		lba += g_strip_size;
1422 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1423 		g_io_output_index = 0;
1424 		raid_bdev_submit_request(ch, bdev_io);
1425 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1426 			  g_child_io_status_flag);
1427 		bdev_io_cleanup(bdev_io);
1428 	}
1429 
1430 	free_test_req(&req);
1431 	spdk_put_io_channel(ch);
1432 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1433 	rpc_bdev_raid_delete(NULL, NULL);
1434 	CU_ASSERT(g_rpc_err == 0);
1435 	verify_raid_bdev_present("raid1", false);
1436 
1437 	raid_bdev_exit();
1438 	base_bdevs_cleanup();
1439 	reset_globals();
1440 }
1441 
1442 static void
1443 test_read_io(void)
1444 {
1445 	struct rpc_bdev_raid_create req;
1446 	struct rpc_bdev_raid_delete destroy_req;
1447 	struct raid_bdev *pbdev;
1448 	struct spdk_io_channel *ch;
1449 	struct raid_bdev_io_channel *ch_ctx;
1450 	uint8_t i;
1451 	struct spdk_bdev_io *bdev_io;
1452 	uint64_t io_len;
1453 	uint64_t lba;
1454 
1455 	set_globals();
1456 	CU_ASSERT(raid_bdev_init() == 0);
1457 
1458 	verify_raid_bdev_present("raid1", false);
1459 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1460 	rpc_bdev_raid_create(NULL, NULL);
1461 	CU_ASSERT(g_rpc_err == 0);
1462 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1463 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1464 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1465 			break;
1466 		}
1467 	}
1468 	CU_ASSERT(pbdev != NULL);
1469 
1470 	ch = spdk_get_io_channel(pbdev);
1471 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1472 
1473 	ch_ctx = spdk_io_channel_get_ctx(ch);
1474 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1475 
1476 	/* test 2 IO sizes based on global strip size set earlier */
1477 	lba = 0;
1478 	for (i = 0; i < 2; i++) {
1479 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1480 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1481 		io_len = (g_strip_size / 2) << i;
1482 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
1483 		lba += g_strip_size;
1484 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1485 		g_io_output_index = 0;
1486 		raid_bdev_submit_request(ch, bdev_io);
1487 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1488 			  g_child_io_status_flag);
1489 		bdev_io_cleanup(bdev_io);
1490 	}
1491 
1492 	free_test_req(&req);
1493 	spdk_put_io_channel(ch);
1494 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1495 	rpc_bdev_raid_delete(NULL, NULL);
1496 	CU_ASSERT(g_rpc_err == 0);
1497 	verify_raid_bdev_present("raid1", false);
1498 
1499 	raid_bdev_exit();
1500 	base_bdevs_cleanup();
1501 	reset_globals();
1502 }
1503 
1504 static void
1505 raid_bdev_io_generate_by_strips(uint64_t n_strips)
1506 {
1507 	uint64_t lba;
1508 	uint64_t nblocks;
1509 	uint64_t start_offset;
1510 	uint64_t end_offset;
1511 	uint64_t offsets_in_strip[3];
1512 	uint64_t start_bdev_idx;
1513 	uint64_t start_bdev_offset;
1514 	uint64_t start_bdev_idxs[3];
1515 	int i, j, l;
1516 
1517 	/* 3 different situations of offset in strip */
1518 	offsets_in_strip[0] = 0;
1519 	offsets_in_strip[1] = g_strip_size >> 1;
1520 	offsets_in_strip[2] = g_strip_size - 1;
1521 
1522 	/* 3 different situations of start_bdev_idx */
1523 	start_bdev_idxs[0] = 0;
1524 	start_bdev_idxs[1] = g_max_base_drives >> 1;
1525 	start_bdev_idxs[2] = g_max_base_drives - 1;
1526 
1527 	/* consider different offset in strip */
1528 	for (i = 0; i < 3; i++) {
1529 		start_offset = offsets_in_strip[i];
1530 		for (j = 0; j < 3; j++) {
1531 			end_offset = offsets_in_strip[j];
1532 			if (n_strips == 1 && start_offset > end_offset) {
1533 				continue;
1534 			}
1535 
1536 			/* consider at which base_bdev lba is started. */
1537 			for (l = 0; l < 3; l++) {
1538 				start_bdev_idx = start_bdev_idxs[l];
1539 				start_bdev_offset = start_bdev_idx * g_strip_size;
1540 				lba = g_lba_offset + start_bdev_offset + start_offset;
1541 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
1542 
1543 				g_io_ranges[g_io_range_idx].lba = lba;
1544 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
1545 
1546 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
1547 				g_io_range_idx++;
1548 			}
1549 		}
1550 	}
1551 }
1552 
1553 static void
1554 raid_bdev_io_generate(void)
1555 {
1556 	uint64_t n_strips;
1557 	uint64_t n_strips_span = g_max_base_drives;
1558 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
1559 				      g_max_base_drives * 2, g_max_base_drives * 3,
1560 				      g_max_base_drives * 4
1561 				     };
1562 	uint32_t i;
1563 
1564 	g_io_range_idx = 0;
1565 
1566 	/* consider different number of strips from 1 to strips spanned base bdevs,
1567 	 * and even to times of strips spanned base bdevs
1568 	 */
1569 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
1570 		raid_bdev_io_generate_by_strips(n_strips);
1571 	}
1572 
1573 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
1574 		n_strips = n_strips_times[i];
1575 		raid_bdev_io_generate_by_strips(n_strips);
1576 	}
1577 }
1578 
1579 static void
1580 test_unmap_io(void)
1581 {
1582 	struct rpc_bdev_raid_create req;
1583 	struct rpc_bdev_raid_delete destroy_req;
1584 	struct raid_bdev *pbdev;
1585 	struct spdk_io_channel *ch;
1586 	struct raid_bdev_io_channel *ch_ctx;
1587 	struct spdk_bdev_io *bdev_io;
1588 	uint32_t count;
1589 	uint64_t io_len;
1590 	uint64_t lba;
1591 
1592 	set_globals();
1593 	CU_ASSERT(raid_bdev_init() == 0);
1594 
1595 	verify_raid_bdev_present("raid1", false);
1596 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1597 	rpc_bdev_raid_create(NULL, NULL);
1598 	CU_ASSERT(g_rpc_err == 0);
1599 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1600 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1601 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1602 			break;
1603 		}
1604 	}
1605 	CU_ASSERT(pbdev != NULL);
1606 
1607 	ch = spdk_get_io_channel(pbdev);
1608 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1609 
1610 	ch_ctx = spdk_io_channel_get_ctx(ch);
1611 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1612 
1613 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
1614 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
1615 
1616 	raid_bdev_io_generate();
1617 	for (count = 0; count < g_io_range_idx; count++) {
1618 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1619 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1620 		io_len = g_io_ranges[count].nblocks;
1621 		lba = g_io_ranges[count].lba;
1622 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
1623 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1624 		g_io_output_index = 0;
1625 		raid_bdev_submit_request(ch, bdev_io);
1626 		verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1627 					  g_child_io_status_flag);
1628 		bdev_io_cleanup(bdev_io);
1629 	}
1630 
1631 	free_test_req(&req);
1632 	spdk_put_io_channel(ch);
1633 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1634 	rpc_bdev_raid_delete(NULL, NULL);
1635 	CU_ASSERT(g_rpc_err == 0);
1636 	verify_raid_bdev_present("raid1", false);
1637 
1638 	raid_bdev_exit();
1639 	base_bdevs_cleanup();
1640 	reset_globals();
1641 }
1642 
1643 /* Test IO failures */
1644 static void
1645 test_io_failure(void)
1646 {
1647 	struct rpc_bdev_raid_create req;
1648 	struct rpc_bdev_raid_delete destroy_req;
1649 	struct raid_bdev *pbdev;
1650 	struct spdk_io_channel *ch;
1651 	struct raid_bdev_io_channel *ch_ctx;
1652 	struct spdk_bdev_io *bdev_io;
1653 	uint32_t count;
1654 	uint64_t io_len;
1655 	uint64_t lba;
1656 
1657 	set_globals();
1658 	CU_ASSERT(raid_bdev_init() == 0);
1659 
1660 	verify_raid_bdev_present("raid1", false);
1661 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1662 	rpc_bdev_raid_create(NULL, NULL);
1663 	CU_ASSERT(g_rpc_err == 0);
1664 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1665 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1666 		if (strcmp(pbdev->bdev.name, req.name) == 0) {
1667 			break;
1668 		}
1669 	}
1670 	CU_ASSERT(pbdev != NULL);
1671 
1672 	ch = spdk_get_io_channel(pbdev);
1673 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1674 
1675 	ch_ctx = spdk_io_channel_get_ctx(ch);
1676 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1677 
1678 	lba = 0;
1679 	for (count = 0; count < 1; count++) {
1680 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1681 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1682 		io_len = (g_strip_size / 2) << count;
1683 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
1684 		lba += g_strip_size;
1685 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1686 		g_io_output_index = 0;
1687 		raid_bdev_submit_request(ch, bdev_io);
1688 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1689 			  INVALID_IO_SUBMIT);
1690 		bdev_io_cleanup(bdev_io);
1691 	}
1692 
1693 
1694 	lba = 0;
1695 	g_child_io_status_flag = false;
1696 	for (count = 0; count < 1; count++) {
1697 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1698 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1699 		io_len = (g_strip_size / 2) << count;
1700 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1701 		lba += g_strip_size;
1702 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1703 		g_io_output_index = 0;
1704 		raid_bdev_submit_request(ch, bdev_io);
1705 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1706 			  g_child_io_status_flag);
1707 		bdev_io_cleanup(bdev_io);
1708 	}
1709 
1710 	free_test_req(&req);
1711 	spdk_put_io_channel(ch);
1712 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1713 	rpc_bdev_raid_delete(NULL, NULL);
1714 	CU_ASSERT(g_rpc_err == 0);
1715 	verify_raid_bdev_present("raid1", false);
1716 
1717 	raid_bdev_exit();
1718 	base_bdevs_cleanup();
1719 	reset_globals();
1720 }
1721 
1722 /* Test reset IO */
1723 static void
1724 test_reset_io(void)
1725 {
1726 	struct rpc_bdev_raid_create req;
1727 	struct rpc_bdev_raid_delete destroy_req;
1728 	struct raid_bdev *pbdev;
1729 	struct spdk_io_channel *ch;
1730 	struct raid_bdev_io_channel *ch_ctx;
1731 	struct spdk_bdev_io *bdev_io;
1732 
1733 	set_globals();
1734 	CU_ASSERT(raid_bdev_init() == 0);
1735 
1736 	verify_raid_bdev_present("raid1", false);
1737 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1738 	rpc_bdev_raid_create(NULL, NULL);
1739 	CU_ASSERT(g_rpc_err == 0);
1740 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1741 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1742 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1743 			break;
1744 		}
1745 	}
1746 	CU_ASSERT(pbdev != NULL);
1747 
1748 	ch = spdk_get_io_channel(pbdev);
1749 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1750 
1751 	ch_ctx = spdk_io_channel_get_ctx(ch);
1752 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1753 
1754 	g_bdev_io_submit_status = 0;
1755 	g_child_io_status_flag = true;
1756 
1757 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1758 
1759 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1760 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1761 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1762 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1763 	g_io_output_index = 0;
1764 	raid_bdev_submit_request(ch, bdev_io);
1765 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1766 			true);
1767 	bdev_io_cleanup(bdev_io);
1768 
1769 	free_test_req(&req);
1770 	spdk_put_io_channel(ch);
1771 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1772 	rpc_bdev_raid_delete(NULL, NULL);
1773 	CU_ASSERT(g_rpc_err == 0);
1774 	verify_raid_bdev_present("raid1", false);
1775 
1776 	raid_bdev_exit();
1777 	base_bdevs_cleanup();
1778 	reset_globals();
1779 }
1780 
1781 /* Create multiple raids, destroy raids without IO, get_raids related tests */
1782 static void
1783 test_multi_raid_no_io(void)
1784 {
1785 	struct rpc_bdev_raid_create *construct_req;
1786 	struct rpc_bdev_raid_delete destroy_req;
1787 	struct rpc_bdev_raid_get_bdevs get_raids_req;
1788 	uint8_t i;
1789 	char name[16];
1790 	uint8_t bbdev_idx = 0;
1791 
1792 	set_globals();
1793 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
1794 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1795 	CU_ASSERT(raid_bdev_init() == 0);
1796 	for (i = 0; i < g_max_raids; i++) {
1797 		snprintf(name, 16, "%s%u", "raid", i);
1798 		verify_raid_bdev_present(name, false);
1799 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1800 		bbdev_idx += g_max_base_drives;
1801 		rpc_bdev_raid_create(NULL, NULL);
1802 		CU_ASSERT(g_rpc_err == 0);
1803 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1804 	}
1805 
1806 	create_get_raids_req(&get_raids_req, "all", 0);
1807 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1808 	CU_ASSERT(g_rpc_err == 0);
1809 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1810 	for (i = 0; i < g_get_raids_count; i++) {
1811 		free(g_get_raids_output[i]);
1812 	}
1813 
1814 	create_get_raids_req(&get_raids_req, "online", 0);
1815 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1816 	CU_ASSERT(g_rpc_err == 0);
1817 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1818 	for (i = 0; i < g_get_raids_count; i++) {
1819 		free(g_get_raids_output[i]);
1820 	}
1821 
1822 	create_get_raids_req(&get_raids_req, "configuring", 0);
1823 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1824 	CU_ASSERT(g_rpc_err == 0);
1825 	CU_ASSERT(g_get_raids_count == 0);
1826 
1827 	create_get_raids_req(&get_raids_req, "offline", 0);
1828 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1829 	CU_ASSERT(g_rpc_err == 0);
1830 	CU_ASSERT(g_get_raids_count == 0);
1831 
1832 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
1833 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1834 	CU_ASSERT(g_rpc_err == 1);
1835 	CU_ASSERT(g_get_raids_count == 0);
1836 
1837 	create_get_raids_req(&get_raids_req, "all", 1);
1838 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1839 	CU_ASSERT(g_rpc_err == 1);
1840 	free(get_raids_req.category);
1841 	CU_ASSERT(g_get_raids_count == 0);
1842 
1843 	create_get_raids_req(&get_raids_req, "all", 0);
1844 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1845 	CU_ASSERT(g_rpc_err == 0);
1846 	CU_ASSERT(g_get_raids_count == g_max_raids);
1847 	for (i = 0; i < g_get_raids_count; i++) {
1848 		free(g_get_raids_output[i]);
1849 	}
1850 
1851 	for (i = 0; i < g_max_raids; i++) {
1852 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
1853 		snprintf(name, 16, "%s", construct_req[i].name);
1854 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1855 		rpc_bdev_raid_delete(NULL, NULL);
1856 		CU_ASSERT(g_rpc_err == 0);
1857 		verify_raid_bdev_present(name, false);
1858 	}
1859 	raid_bdev_exit();
1860 	for (i = 0; i < g_max_raids; i++) {
1861 		free_test_req(&construct_req[i]);
1862 	}
1863 	free(construct_req);
1864 	base_bdevs_cleanup();
1865 	reset_globals();
1866 }
1867 
1868 /* Create multiple raids, fire IOs on raids */
1869 static void
1870 test_multi_raid_with_io(void)
1871 {
1872 	struct rpc_bdev_raid_create *construct_req;
1873 	struct rpc_bdev_raid_delete destroy_req;
1874 	uint8_t i;
1875 	char name[16];
1876 	uint8_t bbdev_idx = 0;
1877 	struct raid_bdev *pbdev;
1878 	struct spdk_io_channel **channels;
1879 	struct spdk_bdev_io *bdev_io;
1880 	uint64_t io_len;
1881 	uint64_t lba = 0;
1882 	int16_t iotype;
1883 
1884 	set_globals();
1885 	construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
1886 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1887 	CU_ASSERT(raid_bdev_init() == 0);
1888 	channels = calloc(g_max_raids, sizeof(*channels));
1889 	SPDK_CU_ASSERT_FATAL(channels != NULL);
1890 
1891 	for (i = 0; i < g_max_raids; i++) {
1892 		snprintf(name, 16, "%s%u", "raid", i);
1893 		verify_raid_bdev_present(name, false);
1894 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1895 		bbdev_idx += g_max_base_drives;
1896 		rpc_bdev_raid_create(NULL, NULL);
1897 		CU_ASSERT(g_rpc_err == 0);
1898 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1899 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1900 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1901 				break;
1902 			}
1903 		}
1904 		CU_ASSERT(pbdev != NULL);
1905 
1906 		channels[i] = spdk_get_io_channel(pbdev);
1907 		SPDK_CU_ASSERT_FATAL(channels[i] != NULL);
1908 	}
1909 
1910 	/* This will perform a write on the first raid and a read on the second. It can be
1911 	 * expanded in the future to perform r/w on each raid device in the event that
1912 	 * multiple raid levels are supported.
1913 	 */
1914 	for (i = 0; i < g_max_raids; i++) {
1915 		struct spdk_io_channel *ch = channels[i];
1916 		struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch);
1917 
1918 		SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1919 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1920 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1921 		io_len = g_strip_size;
1922 		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
1923 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1924 		g_io_output_index = 0;
1925 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1926 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1927 				break;
1928 			}
1929 		}
1930 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype);
1931 		CU_ASSERT(pbdev != NULL);
1932 		raid_bdev_submit_request(ch, bdev_io);
1933 		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
1934 			  g_child_io_status_flag);
1935 		bdev_io_cleanup(bdev_io);
1936 	}
1937 
1938 	for (i = 0; i < g_max_raids; i++) {
1939 		spdk_put_io_channel(channels[i]);
1940 		snprintf(name, 16, "%s", construct_req[i].name);
1941 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1942 		rpc_bdev_raid_delete(NULL, NULL);
1943 		CU_ASSERT(g_rpc_err == 0);
1944 		verify_raid_bdev_present(name, false);
1945 	}
1946 	raid_bdev_exit();
1947 	for (i = 0; i < g_max_raids; i++) {
1948 		free_test_req(&construct_req[i]);
1949 	}
1950 	free(construct_req);
1951 	free(channels);
1952 	base_bdevs_cleanup();
1953 	reset_globals();
1954 }
1955 
1956 static void
1957 test_io_type_supported(void)
1958 {
1959 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
1960 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
1961 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
1962 }
1963 
1964 static void
1965 test_raid_json_dump_info(void)
1966 {
1967 	struct rpc_bdev_raid_create req;
1968 	struct rpc_bdev_raid_delete destroy_req;
1969 	struct raid_bdev *pbdev;
1970 
1971 	set_globals();
1972 	CU_ASSERT(raid_bdev_init() == 0);
1973 
1974 	verify_raid_bdev_present("raid1", false);
1975 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1976 	rpc_bdev_raid_create(NULL, NULL);
1977 	CU_ASSERT(g_rpc_err == 0);
1978 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1979 
1980 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1981 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1982 			break;
1983 		}
1984 	}
1985 	CU_ASSERT(pbdev != NULL);
1986 
1987 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
1988 
1989 	free_test_req(&req);
1990 
1991 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1992 	rpc_bdev_raid_delete(NULL, NULL);
1993 	CU_ASSERT(g_rpc_err == 0);
1994 	verify_raid_bdev_present("raid1", false);
1995 
1996 	raid_bdev_exit();
1997 	base_bdevs_cleanup();
1998 	reset_globals();
1999 }
2000 
2001 static void
2002 test_context_size(void)
2003 {
2004 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
2005 }
2006 
2007 static void
2008 test_raid_level_conversions(void)
2009 {
2010 	const char *raid_str;
2011 
2012 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
2013 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
2014 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
2015 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
2016 
2017 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
2018 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2019 	raid_str = raid_bdev_level_to_str(1234);
2020 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2021 	raid_str = raid_bdev_level_to_str(RAID0);
2022 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
2023 }
2024 
2025 static void
2026 test_create_raid_superblock(void)
2027 {
2028 	struct rpc_bdev_raid_create req;
2029 	struct rpc_bdev_raid_delete delete_req;
2030 
2031 	set_globals();
2032 	CU_ASSERT(raid_bdev_init() == 0);
2033 
2034 	verify_raid_bdev_present("raid1", false);
2035 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
2036 	rpc_bdev_raid_create(NULL, NULL);
2037 	CU_ASSERT(g_rpc_err == 0);
2038 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2039 	free_test_req(&req);
2040 
2041 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
2042 	rpc_bdev_raid_delete(NULL, NULL);
2043 	CU_ASSERT(g_rpc_err == 0);
2044 	raid_bdev_exit();
2045 	base_bdevs_cleanup();
2046 	reset_globals();
2047 }
2048 
2049 static void
2050 complete_process_request(void *ctx)
2051 {
2052 	struct raid_bdev_process_request *process_req = ctx;
2053 
2054 	raid_bdev_process_request_complete(process_req, 0);
2055 }
2056 
2057 static int
2058 submit_process_request(struct raid_bdev_process_request *process_req,
2059 		       struct raid_bdev_io_channel *raid_ch)
2060 {
2061 	struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch));
2062 
2063 	*(uint64_t *)raid_bdev->module_private += process_req->num_blocks;
2064 
2065 	spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req);
2066 
2067 	return process_req->num_blocks;
2068 }
2069 
2070 static void
2071 test_raid_process(void)
2072 {
2073 	struct rpc_bdev_raid_create req;
2074 	struct rpc_bdev_raid_delete destroy_req;
2075 	struct raid_bdev *pbdev;
2076 	struct spdk_bdev *base_bdev;
2077 	struct spdk_thread *process_thread;
2078 	uint64_t num_blocks_processed = 0;
2079 
2080 	set_globals();
2081 	CU_ASSERT(raid_bdev_init() == 0);
2082 
2083 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2084 	verify_raid_bdev_present("raid1", false);
2085 	TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) {
2086 		base_bdev->blockcnt = 128;
2087 	}
2088 	rpc_bdev_raid_create(NULL, NULL);
2089 	CU_ASSERT(g_rpc_err == 0);
2090 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2091 	free_test_req(&req);
2092 
2093 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2094 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2095 			break;
2096 		}
2097 	}
2098 	CU_ASSERT(pbdev != NULL);
2099 
2100 	pbdev->module->submit_process_request = submit_process_request;
2101 	pbdev->module_private = &num_blocks_processed;
2102 
2103 	CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0);
2104 	poll_threads();
2105 
2106 	SPDK_CU_ASSERT_FATAL(pbdev->process != NULL);
2107 
2108 	process_thread = spdk_thread_get_by_id(spdk_thread_get_id(spdk_get_thread()) + 1);
2109 
2110 	while (spdk_thread_poll(process_thread, 0, 0) > 0) {
2111 		poll_threads();
2112 	}
2113 
2114 	CU_ASSERT(pbdev->process == NULL);
2115 	CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt);
2116 
2117 	poll_threads();
2118 
2119 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2120 	rpc_bdev_raid_delete(NULL, NULL);
2121 	CU_ASSERT(g_rpc_err == 0);
2122 	verify_raid_bdev_present("raid1", false);
2123 
2124 	raid_bdev_exit();
2125 	base_bdevs_cleanup();
2126 	reset_globals();
2127 }
2128 
2129 static void
2130 test_raid_io_split(void)
2131 {
2132 	struct rpc_bdev_raid_create req;
2133 	struct rpc_bdev_raid_delete destroy_req;
2134 	struct raid_bdev *pbdev;
2135 	struct spdk_io_channel *ch;
2136 	struct raid_bdev_io_channel *raid_ch;
2137 	struct spdk_bdev_io *bdev_io;
2138 	struct raid_bdev_io *raid_io;
2139 	uint64_t split_offset;
2140 	struct iovec iovs_orig[4];
2141 	struct raid_bdev_process process = { };
2142 
2143 	set_globals();
2144 	CU_ASSERT(raid_bdev_init() == 0);
2145 
2146 	verify_raid_bdev_present("raid1", false);
2147 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2148 	rpc_bdev_raid_create(NULL, NULL);
2149 	CU_ASSERT(g_rpc_err == 0);
2150 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2151 
2152 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2153 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2154 			break;
2155 		}
2156 	}
2157 	CU_ASSERT(pbdev != NULL);
2158 	pbdev->bdev.md_len = 8;
2159 
2160 	process.raid_bdev = pbdev;
2161 	process.target = &pbdev->base_bdev_info[0];
2162 	pbdev->process = &process;
2163 	ch = spdk_get_io_channel(pbdev);
2164 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2165 	raid_ch = spdk_io_channel_get_ctx(ch);
2166 	g_bdev_io_defer_completion = true;
2167 
2168 	/* test split of bdev_io with 1 iovec */
2169 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2170 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2171 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2172 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE);
2173 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2174 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2175 	bdev_io->u.bdev.md_buf = (void *)0x1000000;
2176 	g_io_output_index = 0;
2177 
2178 	split_offset = 1;
2179 	raid_ch->process.offset = split_offset;
2180 	raid_bdev_submit_request(ch, bdev_io);
2181 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2182 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2183 	CU_ASSERT(raid_io->iovcnt == 1);
2184 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2185 	CU_ASSERT(raid_io->iovs == raid_io->split.iov);
2186 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len);
2187 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len);
2188 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2189 	complete_deferred_ios();
2190 	CU_ASSERT(raid_io->num_blocks == split_offset);
2191 	CU_ASSERT(raid_io->offset_blocks == 0);
2192 	CU_ASSERT(raid_io->iovcnt == 1);
2193 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2194 	CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len);
2195 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2196 	complete_deferred_ios();
2197 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2198 	CU_ASSERT(raid_io->offset_blocks == 0);
2199 	CU_ASSERT(raid_io->iovcnt == 1);
2200 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2201 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len);
2202 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2203 
2204 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2205 	CU_ASSERT(g_io_output_index == 2);
2206 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2207 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2208 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2209 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2210 	bdev_io_cleanup(bdev_io);
2211 
2212 	/* test split of bdev_io with 4 iovecs */
2213 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2214 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2215 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2216 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE,
2217 			    4, g_strip_size / 4 * g_block_len);
2218 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2219 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2220 	bdev_io->u.bdev.md_buf = (void *)0x1000000;
2221 	g_io_output_index = 0;
2222 
2223 	split_offset = 1; /* split at the first iovec */
2224 	raid_ch->process.offset = split_offset;
2225 	raid_bdev_submit_request(ch, bdev_io);
2226 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2227 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2228 	CU_ASSERT(raid_io->iovcnt == 4);
2229 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]);
2230 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]);
2231 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len);
2232 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len -  g_block_len);
2233 	CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0);
2234 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2235 	complete_deferred_ios();
2236 	CU_ASSERT(raid_io->num_blocks == split_offset);
2237 	CU_ASSERT(raid_io->offset_blocks == 0);
2238 	CU_ASSERT(raid_io->iovcnt == 1);
2239 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2240 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base);
2241 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2242 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2243 	complete_deferred_ios();
2244 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2245 	CU_ASSERT(raid_io->offset_blocks == 0);
2246 	CU_ASSERT(raid_io->iovcnt == 4);
2247 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2248 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2249 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2250 
2251 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2252 	CU_ASSERT(g_io_output_index == 2);
2253 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2254 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2255 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2256 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2257 
2258 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2259 	g_io_output_index = 0;
2260 
2261 	split_offset = g_strip_size / 2; /* split exactly between second and third iovec */
2262 	raid_ch->process.offset = split_offset;
2263 	raid_bdev_submit_request(ch, bdev_io);
2264 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2265 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2266 	CU_ASSERT(raid_io->iovcnt == 2);
2267 	CU_ASSERT(raid_io->split.iov == NULL);
2268 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2269 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2270 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2271 	complete_deferred_ios();
2272 	CU_ASSERT(raid_io->num_blocks == split_offset);
2273 	CU_ASSERT(raid_io->offset_blocks == 0);
2274 	CU_ASSERT(raid_io->iovcnt == 2);
2275 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2276 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2277 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2278 	complete_deferred_ios();
2279 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2280 	CU_ASSERT(raid_io->offset_blocks == 0);
2281 	CU_ASSERT(raid_io->iovcnt == 4);
2282 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2283 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2284 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2285 
2286 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2287 	CU_ASSERT(g_io_output_index == 2);
2288 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2289 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2290 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2291 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2292 
2293 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2294 	g_io_output_index = 0;
2295 
2296 	split_offset = g_strip_size / 2 + 1; /* split at the third iovec */
2297 	raid_ch->process.offset = split_offset;
2298 	raid_bdev_submit_request(ch, bdev_io);
2299 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2300 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2301 	CU_ASSERT(raid_io->iovcnt == 2);
2302 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]);
2303 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2304 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len);
2305 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len);
2306 	CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base);
2307 	CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len);
2308 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2309 	complete_deferred_ios();
2310 	CU_ASSERT(raid_io->num_blocks == split_offset);
2311 	CU_ASSERT(raid_io->offset_blocks == 0);
2312 	CU_ASSERT(raid_io->iovcnt == 3);
2313 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2314 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0);
2315 	CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base);
2316 	CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len);
2317 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2318 	complete_deferred_ios();
2319 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2320 	CU_ASSERT(raid_io->offset_blocks == 0);
2321 	CU_ASSERT(raid_io->iovcnt == 4);
2322 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2323 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2324 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2325 
2326 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2327 	CU_ASSERT(g_io_output_index == 2);
2328 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2329 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2330 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2331 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2332 
2333 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2334 	g_io_output_index = 0;
2335 
2336 	split_offset = g_strip_size - 1; /* split at the last iovec */
2337 	raid_ch->process.offset = split_offset;
2338 	raid_bdev_submit_request(ch, bdev_io);
2339 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2340 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2341 	CU_ASSERT(raid_io->iovcnt == 1);
2342 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]);
2343 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]);
2344 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len);
2345 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2346 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2347 	complete_deferred_ios();
2348 	CU_ASSERT(raid_io->num_blocks == split_offset);
2349 	CU_ASSERT(raid_io->offset_blocks == 0);
2350 	CU_ASSERT(raid_io->iovcnt == 4);
2351 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2352 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0);
2353 	CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base);
2354 	CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len);
2355 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2356 	complete_deferred_ios();
2357 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2358 	CU_ASSERT(raid_io->offset_blocks == 0);
2359 	CU_ASSERT(raid_io->iovcnt == 4);
2360 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2361 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2362 	CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2363 
2364 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2365 	CU_ASSERT(g_io_output_index == 2);
2366 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2367 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2368 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2369 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2370 	bdev_io_cleanup(bdev_io);
2371 
2372 	spdk_put_io_channel(ch);
2373 	free_test_req(&req);
2374 
2375 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2376 	rpc_bdev_raid_delete(NULL, NULL);
2377 	CU_ASSERT(g_rpc_err == 0);
2378 	verify_raid_bdev_present("raid1", false);
2379 
2380 	raid_bdev_exit();
2381 	base_bdevs_cleanup();
2382 	reset_globals();
2383 }
2384 
2385 static int
2386 test_bdev_ioch_create(void *io_device, void *ctx_buf)
2387 {
2388 	return 0;
2389 }
2390 
2391 static void
2392 test_bdev_ioch_destroy(void *io_device, void *ctx_buf)
2393 {
2394 }
2395 
2396 int
2397 main(int argc, char **argv)
2398 {
2399 	CU_pSuite       suite = NULL;
2400 	unsigned int    num_failures;
2401 
2402 	CU_initialize_registry();
2403 
2404 	suite = CU_add_suite("raid", NULL, NULL);
2405 
2406 	CU_ADD_TEST(suite, test_create_raid);
2407 	CU_ADD_TEST(suite, test_create_raid_superblock);
2408 	CU_ADD_TEST(suite, test_delete_raid);
2409 	CU_ADD_TEST(suite, test_create_raid_invalid_args);
2410 	CU_ADD_TEST(suite, test_delete_raid_invalid_args);
2411 	CU_ADD_TEST(suite, test_io_channel);
2412 	CU_ADD_TEST(suite, test_reset_io);
2413 	CU_ADD_TEST(suite, test_write_io);
2414 	CU_ADD_TEST(suite, test_read_io);
2415 	CU_ADD_TEST(suite, test_unmap_io);
2416 	CU_ADD_TEST(suite, test_io_failure);
2417 	CU_ADD_TEST(suite, test_multi_raid_no_io);
2418 	CU_ADD_TEST(suite, test_multi_raid_with_io);
2419 	CU_ADD_TEST(suite, test_io_type_supported);
2420 	CU_ADD_TEST(suite, test_raid_json_dump_info);
2421 	CU_ADD_TEST(suite, test_context_size);
2422 	CU_ADD_TEST(suite, test_raid_level_conversions);
2423 	CU_ADD_TEST(suite, test_raid_process);
2424 	CU_ADD_TEST(suite, test_raid_io_split);
2425 
2426 	allocate_threads(1);
2427 	set_thread(0);
2428 	spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0,
2429 				NULL);
2430 
2431 	set_test_opts();
2432 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2433 	CU_cleanup_registry();
2434 
2435 	spdk_io_device_unregister(&g_bdev_ch_io_device, NULL);
2436 	free_threads();
2437 
2438 	return num_failures;
2439 }
2440