xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision 245743507d89cb9e3eae0d7a3c6f591890d53293)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "bdev/raid/raid0.c"
15 #include "common/lib/test_env.c"
16 
17 #define MAX_BASE_DRIVES 32
18 #define MAX_RAIDS 2
19 #define INVALID_IO_SUBMIT 0xFFFF
20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
22 #define MD_SIZE 8
23 
24 struct spdk_bdev_channel {
25 	struct spdk_io_channel *channel;
26 };
27 
28 struct spdk_bdev_desc {
29 	struct spdk_bdev *bdev;
30 };
31 
32 /* Data structure to capture the output of IO for verification */
33 struct io_output {
34 	struct spdk_bdev_desc       *desc;
35 	struct spdk_io_channel      *ch;
36 	uint64_t                    offset_blocks;
37 	uint64_t                    num_blocks;
38 	spdk_bdev_io_completion_cb  cb;
39 	void                        *cb_arg;
40 	enum spdk_bdev_io_type      iotype;
41 	struct iovec                *iovs;
42 	int                         iovcnt;
43 	void                        *md_buf;
44 };
45 
46 struct raid_io_ranges {
47 	uint64_t lba;
48 	uint64_t nblocks;
49 };
50 
51 /* Globals */
52 int g_bdev_io_submit_status;
53 struct io_output *g_io_output = NULL;
54 uint32_t g_io_output_index;
55 uint32_t g_io_comp_status;
56 bool g_child_io_status_flag;
57 void *g_rpc_req;
58 uint32_t g_rpc_req_size;
59 TAILQ_HEAD(bdev, spdk_bdev);
60 struct bdev g_bdev_list;
61 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
62 struct waitq g_io_waitq;
63 uint32_t g_block_len;
64 uint32_t g_strip_size;
65 uint32_t g_max_io_size;
66 uint8_t g_max_base_drives;
67 uint8_t g_max_raids;
68 uint8_t g_ignore_io_output;
69 uint8_t g_rpc_err;
70 char *g_get_raids_output[MAX_RAIDS];
71 uint32_t g_get_raids_count;
72 uint8_t g_json_decode_obj_err;
73 uint8_t g_json_decode_obj_create;
74 uint8_t g_config_level_create = 0;
75 uint8_t g_test_multi_raids;
76 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
77 uint32_t g_io_range_idx;
78 uint64_t g_lba_offset;
79 uint64_t g_bdev_ch_io_device;
80 bool g_bdev_io_defer_completion;
81 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
82 bool g_enable_dif;
83 struct spdk_thread *g_app_thread;
84 struct spdk_thread *g_latest_thread;
85 
86 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
87 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
88 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
89 		enum spdk_bdev_io_type io_type), true);
90 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
91 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
92 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
93 		void *cb_arg), 0);
94 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
95 	    NULL);
96 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
97 		uint32_t state_mask));
98 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
99 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
100 					struct spdk_json_write_ctx *w));
101 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
102 		bool value));
103 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
104 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
105 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
106 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
107 		spdk_json_decode_fn decode_func,
108 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
109 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
110 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
111 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
112 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
113 		const char *name), 0);
114 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
115 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
116 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
117 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
118 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
119 		const char *name), 0);
120 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
121 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
122 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
123 		uint64_t val), 0);
124 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
125 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
126 		struct spdk_bdev_io_wait_entry *entry), 0);
127 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
128 		struct spdk_memory_domain **domains,	int array_size), 0);
129 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
130 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
131 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
132 DEFINE_STUB(spdk_json_write_named_uuid, int, (struct spdk_json_write_ctx *w, const char *name,
133 		const struct spdk_uuid *val), 0);
134 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
135 DEFINE_STUB(raid_bdev_alloc_superblock, int, (struct raid_bdev *raid_bdev, uint32_t block_size), 0);
136 DEFINE_STUB_V(raid_bdev_free_superblock, (struct raid_bdev *raid_bdev));
137 
138 
139 uint32_t
140 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
141 {
142 	return g_block_len;
143 }
144 
145 typedef enum spdk_dif_type spdk_dif_type_t;
146 
147 spdk_dif_type_t
148 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
149 {
150 	if (bdev->md_len != 0) {
151 		return bdev->dif_type;
152 	} else {
153 		return SPDK_DIF_DISABLE;
154 	}
155 }
156 
157 bool
158 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
159 {
160 	return (bdev->md_len != 0) && bdev->md_interleave;
161 }
162 
163 bool
164 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
165 {
166 	return (bdev->md_len != 0) && !bdev->md_interleave;
167 }
168 
169 uint32_t
170 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
171 {
172 	return bdev->md_len;
173 }
174 
175 uint32_t
176 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
177 {
178 	return bdev->blocklen;
179 }
180 
181 int
182 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
183 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
184 {
185 	cb(NULL, -EINVAL, cb_ctx);
186 
187 	return 0;
188 }
189 
190 void
191 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
192 {
193 	cb(0, raid_bdev, cb_ctx);
194 }
195 
196 const struct spdk_uuid *
197 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
198 {
199 	return &bdev->uuid;
200 }
201 
202 struct spdk_io_channel *
203 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
204 {
205 	return spdk_get_io_channel(&g_bdev_ch_io_device);
206 }
207 
208 static int
209 set_test_opts(void)
210 {
211 
212 	g_max_base_drives = MAX_BASE_DRIVES;
213 	g_max_raids = MAX_RAIDS;
214 	g_block_len = 4096;
215 	g_strip_size = 64;
216 	g_max_io_size = 1024;
217 	g_enable_dif = false;
218 
219 	printf("Test Options\n");
220 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
221 	       "g_max_raids = %u, g_enable_dif = %d\n",
222 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids,
223 	       g_enable_dif);
224 
225 	return 0;
226 }
227 
228 static int
229 set_test_opts_dif(void)
230 {
231 
232 	g_max_base_drives = MAX_BASE_DRIVES;
233 	g_max_raids = MAX_RAIDS;
234 	g_block_len = 4096;
235 	g_strip_size = 64;
236 	g_max_io_size = 1024;
237 	g_enable_dif = true;
238 
239 	printf("Test Options\n");
240 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
241 	       "g_max_raids = %u, g_enable_dif = %d\n",
242 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids,
243 	       g_enable_dif);
244 
245 	return 0;
246 }
247 
248 /* Set globals before every test run */
249 static void
250 set_globals(void)
251 {
252 	uint32_t max_splits;
253 
254 	g_bdev_io_submit_status = 0;
255 	if (g_max_io_size < g_strip_size) {
256 		max_splits = 2;
257 	} else {
258 		max_splits = (g_max_io_size / g_strip_size) + 1;
259 	}
260 	if (max_splits < g_max_base_drives) {
261 		max_splits = g_max_base_drives;
262 	}
263 
264 	g_io_output = calloc(max_splits, sizeof(struct io_output));
265 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
266 	g_io_output_index = 0;
267 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
268 	g_get_raids_count = 0;
269 	g_io_comp_status = 0;
270 	g_ignore_io_output = 0;
271 	g_config_level_create = 0;
272 	g_rpc_err = 0;
273 	g_test_multi_raids = 0;
274 	g_child_io_status_flag = true;
275 	TAILQ_INIT(&g_bdev_list);
276 	TAILQ_INIT(&g_io_waitq);
277 	g_rpc_req = NULL;
278 	g_rpc_req_size = 0;
279 	g_json_decode_obj_err = 0;
280 	g_json_decode_obj_create = 0;
281 	g_lba_offset = 0;
282 	g_bdev_io_defer_completion = false;
283 }
284 
285 static void
286 base_bdevs_cleanup(void)
287 {
288 	struct spdk_bdev *bdev;
289 	struct spdk_bdev *bdev_next;
290 
291 	if (!TAILQ_EMPTY(&g_bdev_list)) {
292 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
293 			free(bdev->name);
294 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
295 			free(bdev);
296 		}
297 	}
298 }
299 
300 static void
301 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
302 {
303 	struct raid_base_bdev_info *base_info;
304 
305 	assert(raid_bdev != NULL);
306 	assert(raid_bdev->base_bdev_info != NULL);
307 
308 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
309 		if (base_info->desc) {
310 			raid_bdev_free_base_bdev_resource(base_info);
311 		}
312 	}
313 	assert(raid_bdev->num_base_bdevs_discovered == 0);
314 	raid_bdev_cleanup_and_free(raid_bdev);
315 }
316 
317 /* Reset globals */
318 static void
319 reset_globals(void)
320 {
321 	if (g_io_output) {
322 		free(g_io_output);
323 		g_io_output = NULL;
324 	}
325 	g_rpc_req = NULL;
326 	g_rpc_req_size = 0;
327 }
328 
329 void
330 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
331 		     uint64_t len)
332 {
333 	cb(bdev_io->internal.ch->channel, bdev_io, true);
334 }
335 
336 static void
337 generate_dif(struct iovec *iovs, int iovcnt, void *md_buf,
338 	     uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
339 {
340 	struct spdk_dif_ctx dif_ctx;
341 	int rc;
342 	struct spdk_dif_ctx_init_ext_opts dif_opts;
343 	spdk_dif_type_t dif_type;
344 	bool md_interleaved;
345 	struct iovec md_iov;
346 
347 	dif_type = spdk_bdev_get_dif_type(bdev);
348 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
349 
350 	if (dif_type == SPDK_DIF_DISABLE) {
351 		return;
352 	}
353 
354 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
355 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
356 	rc = spdk_dif_ctx_init(&dif_ctx,
357 			       spdk_bdev_get_block_size(bdev),
358 			       spdk_bdev_get_md_size(bdev),
359 			       md_interleaved,
360 			       spdk_bdev_is_dif_head_of_md(bdev),
361 			       dif_type,
362 			       bdev->dif_check_flags,
363 			       offset_blocks,
364 			       0xFFFF, 0x123, 0, 0, &dif_opts);
365 	SPDK_CU_ASSERT_FATAL(rc == 0);
366 
367 	if (!md_interleaved) {
368 		md_iov.iov_base = md_buf;
369 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
370 
371 		rc = spdk_dix_generate(iovs, iovcnt, &md_iov, num_blocks, &dif_ctx);
372 		SPDK_CU_ASSERT_FATAL(rc == 0);
373 	}
374 }
375 
376 static void
377 verify_dif(struct iovec *iovs, int iovcnt, void *md_buf,
378 	   uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
379 {
380 	struct spdk_dif_ctx dif_ctx;
381 	int rc;
382 	struct spdk_dif_ctx_init_ext_opts dif_opts;
383 	struct spdk_dif_error errblk;
384 	spdk_dif_type_t dif_type;
385 	bool md_interleaved;
386 	struct iovec md_iov;
387 
388 	dif_type = spdk_bdev_get_dif_type(bdev);
389 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
390 
391 	if (dif_type == SPDK_DIF_DISABLE) {
392 		return;
393 	}
394 
395 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
396 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
397 	rc = spdk_dif_ctx_init(&dif_ctx,
398 			       spdk_bdev_get_block_size(bdev),
399 			       spdk_bdev_get_md_size(bdev),
400 			       md_interleaved,
401 			       spdk_bdev_is_dif_head_of_md(bdev),
402 			       dif_type,
403 			       bdev->dif_check_flags,
404 			       offset_blocks,
405 			       0xFFFF, 0x123, 0, 0, &dif_opts);
406 	SPDK_CU_ASSERT_FATAL(rc == 0);
407 
408 	if (!md_interleaved) {
409 		md_iov.iov_base = md_buf;
410 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
411 
412 		rc = spdk_dix_verify(iovs, iovcnt,
413 				     &md_iov, num_blocks, &dif_ctx, &errblk);
414 		SPDK_CU_ASSERT_FATAL(rc == 0);
415 	}
416 }
417 
418 /* Store the IO completion status in global variable to verify by various tests */
419 void
420 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
421 {
422 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
423 
424 	if (g_io_comp_status && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
425 		verify_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
426 			   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
427 	}
428 }
429 
430 static void
431 set_io_output(struct io_output *output,
432 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
433 	      uint64_t offset_blocks, uint64_t num_blocks,
434 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
435 	      enum spdk_bdev_io_type iotype, struct iovec *iovs,
436 	      int iovcnt, void *md)
437 {
438 	output->desc = desc;
439 	output->ch = ch;
440 	output->offset_blocks = offset_blocks;
441 	output->num_blocks = num_blocks;
442 	output->cb = cb;
443 	output->cb_arg = cb_arg;
444 	output->iotype = iotype;
445 	output->iovs = iovs;
446 	output->iovcnt = iovcnt;
447 	output->md_buf = md;
448 }
449 
450 static void
451 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
452 {
453 	if (g_bdev_io_defer_completion) {
454 		child_io->internal.cb = cb;
455 		child_io->internal.caller_ctx = cb_arg;
456 		TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link);
457 	} else {
458 		cb(child_io, g_child_io_status_flag, cb_arg);
459 	}
460 }
461 
462 static void
463 complete_deferred_ios(void)
464 {
465 	struct spdk_bdev_io *child_io, *tmp;
466 
467 	TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) {
468 		TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
469 		child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
470 	}
471 }
472 
473 /* It will cache the split IOs for verification */
474 int
475 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
476 			struct iovec *iov, int iovcnt,
477 			uint64_t offset_blocks, uint64_t num_blocks,
478 			spdk_bdev_io_completion_cb cb, void *cb_arg)
479 {
480 	return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
481 					   num_blocks, cb, cb_arg, NULL);
482 }
483 
484 int
485 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
486 			    struct iovec *iov, int iovcnt,
487 			    uint64_t offset_blocks, uint64_t num_blocks,
488 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
489 			    struct spdk_bdev_ext_io_opts *opts)
490 {
491 	struct io_output *output = &g_io_output[g_io_output_index];
492 	struct spdk_bdev_io *child_io;
493 
494 	if (g_ignore_io_output) {
495 		return 0;
496 	}
497 
498 	if (g_max_io_size < g_strip_size) {
499 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
500 	} else {
501 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
502 	}
503 	if (g_bdev_io_submit_status == 0) {
504 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
505 			      SPDK_BDEV_IO_TYPE_WRITE, iov, iovcnt, opts->metadata);
506 		g_io_output_index++;
507 
508 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
509 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
510 		child_io_complete(child_io, cb, cb_arg);
511 	}
512 
513 	return g_bdev_io_submit_status;
514 }
515 
516 int
517 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
518 				struct iovec *iov, int iovcnt, void *md,
519 				uint64_t offset_blocks, uint64_t num_blocks,
520 				spdk_bdev_io_completion_cb cb, void *cb_arg)
521 {
522 	struct spdk_bdev_ext_io_opts opts = {
523 		.metadata = md
524 	};
525 
526 	return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
527 					   num_blocks, cb, cb_arg, &opts);
528 }
529 
530 int
531 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
532 		spdk_bdev_io_completion_cb cb, void *cb_arg)
533 {
534 	struct io_output *output = &g_io_output[g_io_output_index];
535 	struct spdk_bdev_io *child_io;
536 
537 	if (g_ignore_io_output) {
538 		return 0;
539 	}
540 
541 	if (g_bdev_io_submit_status == 0) {
542 		set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET,
543 			      NULL, 0, NULL);
544 		g_io_output_index++;
545 
546 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
547 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
548 		child_io_complete(child_io, cb, cb_arg);
549 	}
550 
551 	return g_bdev_io_submit_status;
552 }
553 
554 int
555 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
556 		       uint64_t offset_blocks, uint64_t num_blocks,
557 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
558 {
559 	struct io_output *output = &g_io_output[g_io_output_index];
560 	struct spdk_bdev_io *child_io;
561 
562 	if (g_ignore_io_output) {
563 		return 0;
564 	}
565 
566 	if (g_bdev_io_submit_status == 0) {
567 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
568 			      SPDK_BDEV_IO_TYPE_UNMAP, NULL, 0, NULL);
569 		g_io_output_index++;
570 
571 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
572 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
573 		child_io_complete(child_io, cb, cb_arg);
574 	}
575 
576 	return g_bdev_io_submit_status;
577 }
578 
579 void
580 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
581 {
582 	CU_ASSERT(bdeverrno == 0);
583 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
584 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
585 }
586 
587 int
588 spdk_bdev_register(struct spdk_bdev *bdev)
589 {
590 	TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
591 	return 0;
592 }
593 
594 static void
595 poll_app_thread(void)
596 {
597 	while (spdk_thread_poll(g_app_thread, 0, 0) > 0) {
598 	}
599 }
600 
601 void
602 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
603 {
604 	int ret;
605 
606 	SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev);
607 	TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
608 
609 	bdev->internal.unregister_cb = cb_fn;
610 	bdev->internal.unregister_ctx = cb_arg;
611 
612 	ret = bdev->fn_table->destruct(bdev->ctxt);
613 	CU_ASSERT(ret == 1);
614 
615 	poll_app_thread();
616 }
617 
618 int
619 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
620 		   void *event_ctx, struct spdk_bdev_desc **_desc)
621 {
622 	struct spdk_bdev *bdev;
623 
624 	bdev = spdk_bdev_get_by_name(bdev_name);
625 	if (bdev == NULL) {
626 		return -ENODEV;
627 	}
628 
629 	*_desc = (void *)bdev;
630 	return 0;
631 }
632 
633 struct spdk_bdev *
634 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
635 {
636 	return (void *)desc;
637 }
638 
639 int
640 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
641 {
642 	if (!g_test_multi_raids) {
643 		struct rpc_bdev_raid_create *req = g_rpc_req;
644 		if (strcmp(name, "strip_size_kb") == 0) {
645 			CU_ASSERT(req->strip_size_kb == val);
646 		} else if (strcmp(name, "blocklen_shift") == 0) {
647 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
648 		} else if (strcmp(name, "num_base_bdevs") == 0) {
649 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
650 		} else if (strcmp(name, "state") == 0) {
651 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
652 		} else if (strcmp(name, "destruct_called") == 0) {
653 			CU_ASSERT(val == 0);
654 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
655 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
656 		}
657 	}
658 	return 0;
659 }
660 
661 int
662 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
663 {
664 	if (g_test_multi_raids) {
665 		if (strcmp(name, "name") == 0) {
666 			g_get_raids_output[g_get_raids_count] = strdup(val);
667 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
668 			g_get_raids_count++;
669 		}
670 	} else {
671 		struct rpc_bdev_raid_create *req = g_rpc_req;
672 		if (strcmp(name, "raid_level") == 0) {
673 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
674 		}
675 	}
676 	return 0;
677 }
678 
679 int
680 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
681 {
682 	if (!g_test_multi_raids) {
683 		struct rpc_bdev_raid_create *req = g_rpc_req;
684 		if (strcmp(name, "superblock") == 0) {
685 			CU_ASSERT(val == req->superblock_enabled);
686 		}
687 	}
688 	return 0;
689 }
690 
691 void
692 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
693 {
694 	if (bdev_io) {
695 		free(bdev_io);
696 	}
697 }
698 
699 /* It will cache split IOs for verification */
700 int
701 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
702 		       struct iovec *iov, int iovcnt,
703 		       uint64_t offset_blocks, uint64_t num_blocks,
704 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
705 {
706 	return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
707 					  num_blocks, cb, cb_arg, NULL);
708 }
709 
710 int
711 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
712 			   struct iovec *iov, int iovcnt,
713 			   uint64_t offset_blocks, uint64_t num_blocks,
714 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
715 			   struct spdk_bdev_ext_io_opts *opts)
716 {
717 	struct io_output *output = &g_io_output[g_io_output_index];
718 	struct spdk_bdev_io *child_io;
719 
720 	if (g_ignore_io_output) {
721 		return 0;
722 	}
723 
724 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
725 	if (g_bdev_io_submit_status == 0) {
726 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
727 			      SPDK_BDEV_IO_TYPE_READ, iov, iovcnt, opts->metadata);
728 		generate_dif(iov, iovcnt, opts->metadata, offset_blocks, num_blocks,
729 			     spdk_bdev_desc_get_bdev(desc));
730 		g_io_output_index++;
731 
732 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
733 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
734 		child_io_complete(child_io, cb, cb_arg);
735 	}
736 
737 	return g_bdev_io_submit_status;
738 }
739 
740 int
741 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc,	struct spdk_io_channel *ch,
742 			       struct iovec *iov, int iovcnt, void *md,
743 			       uint64_t offset_blocks, uint64_t num_blocks,
744 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
745 {
746 	struct spdk_bdev_ext_io_opts opts = {
747 		.metadata = md
748 	};
749 
750 	return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
751 					  num_blocks, cb, cb_arg, &opts);
752 }
753 
754 
755 void
756 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
757 {
758 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
759 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
760 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
761 	bdev->internal.claim.v1.module = NULL;
762 }
763 
764 int
765 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
766 			    struct spdk_bdev_module *module)
767 {
768 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
769 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
770 		return -1;
771 	}
772 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
773 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
774 	bdev->internal.claim.v1.module = module;
775 	return 0;
776 }
777 
778 int
779 spdk_json_decode_object(const struct spdk_json_val *values,
780 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
781 			void *out)
782 {
783 	struct rpc_bdev_raid_create *req, *_out;
784 	size_t i;
785 
786 	if (g_json_decode_obj_err) {
787 		return -1;
788 	} else if (g_json_decode_obj_create) {
789 		req = g_rpc_req;
790 		_out = out;
791 
792 		_out->name = strdup(req->name);
793 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
794 		_out->strip_size_kb = req->strip_size_kb;
795 		_out->level = req->level;
796 		_out->superblock_enabled = req->superblock_enabled;
797 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
798 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
799 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
800 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
801 		}
802 	} else {
803 		memcpy(out, g_rpc_req, g_rpc_req_size);
804 	}
805 
806 	return 0;
807 }
808 
809 struct spdk_json_write_ctx *
810 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
811 {
812 	return (void *)1;
813 }
814 
815 void
816 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
817 				 int error_code, const char *msg)
818 {
819 	g_rpc_err = 1;
820 }
821 
822 void
823 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
824 				     int error_code, const char *fmt, ...)
825 {
826 	g_rpc_err = 1;
827 }
828 
829 struct spdk_bdev *
830 spdk_bdev_get_by_name(const char *bdev_name)
831 {
832 	struct spdk_bdev *bdev;
833 
834 	if (!TAILQ_EMPTY(&g_bdev_list)) {
835 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
836 			if (strcmp(bdev_name, bdev->name) == 0) {
837 				return bdev;
838 			}
839 		}
840 	}
841 
842 	return NULL;
843 }
844 
845 int
846 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
847 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
848 {
849 	if (cb_fn) {
850 		cb_fn(cb_arg, 0);
851 	}
852 
853 	return 0;
854 }
855 
856 int
857 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
858 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
859 {
860 	if (cb_fn) {
861 		cb_fn(cb_arg, 0);
862 	}
863 
864 	return 0;
865 }
866 
867 int
868 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
869 			uint64_t offset, uint64_t length,
870 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
871 {
872 	if (cb_fn) {
873 		cb_fn(cb_arg, 0);
874 	}
875 
876 	return 0;
877 }
878 
879 int
880 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
881 			  uint64_t offset, uint64_t length,
882 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
883 {
884 	if (cb_fn) {
885 		cb_fn(cb_arg, 0);
886 	}
887 
888 	return 0;
889 }
890 
891 static void
892 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
893 {
894 	if (bdev_io->u.bdev.iovs) {
895 		int i;
896 
897 		for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
898 			free(bdev_io->u.bdev.iovs[i].iov_base);
899 		}
900 		free(bdev_io->u.bdev.iovs);
901 	}
902 
903 	free(bdev_io->u.bdev.md_buf);
904 	free(bdev_io);
905 }
906 
907 static void
908 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch,
909 		    struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype,
910 		    int iovcnt, size_t iov_len)
911 {
912 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
913 	int i;
914 
915 	bdev_io->bdev = bdev;
916 	bdev_io->u.bdev.offset_blocks = lba;
917 	bdev_io->u.bdev.num_blocks = blocks;
918 	bdev_io->type = iotype;
919 	bdev_io->internal.ch = channel;
920 	bdev_io->u.bdev.iovcnt = iovcnt;
921 
922 	if (iovcnt == 0) {
923 		bdev_io->u.bdev.iovs = NULL;
924 		bdev_io->u.bdev.md_buf = NULL;
925 		return;
926 	}
927 
928 	SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len);
929 
930 	bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec));
931 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
932 
933 	for (i = 0; i < iovcnt; i++) {
934 		struct iovec *iov = &bdev_io->u.bdev.iovs[i];
935 
936 		iov->iov_base = calloc(1, iov_len);
937 		SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
938 		iov->iov_len = iov_len;
939 	}
940 
941 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE && !spdk_bdev_is_md_interleaved(bdev)) {
942 		bdev_io->u.bdev.md_buf = calloc(1, blocks * spdk_bdev_get_md_size(bdev));
943 	}
944 }
945 
946 static void
947 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
948 		   uint64_t lba, uint64_t blocks, int16_t iotype)
949 {
950 	int iovcnt;
951 	size_t iov_len;
952 
953 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
954 		iovcnt = 0;
955 		iov_len = 0;
956 	} else {
957 		iovcnt = 1;
958 		iov_len = blocks * g_block_len;
959 	}
960 
961 	_bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len);
962 }
963 
964 static void
965 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
966 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
967 {
968 	uint8_t index = 0;
969 	struct io_output *output;
970 
971 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
972 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
973 	SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
974 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
975 
976 	CU_ASSERT(g_io_output_index == num_base_drives);
977 	for (index = 0; index < g_io_output_index; index++) {
978 		output = &g_io_output[index];
979 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
980 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
981 		CU_ASSERT(bdev_io->type == output->iotype);
982 	}
983 	CU_ASSERT(g_io_comp_status == io_status);
984 }
985 
986 static void
987 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
988 	  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
989 {
990 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
991 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
992 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
993 			     strip_shift;
994 	uint32_t splits_reqd = (end_strip - start_strip + 1);
995 	uint32_t strip;
996 	uint64_t pd_strip;
997 	uint8_t pd_idx;
998 	uint32_t offset_in_strip;
999 	uint64_t pd_lba;
1000 	uint64_t pd_blocks;
1001 	uint32_t index = 0;
1002 	struct io_output *output;
1003 
1004 	if (io_status == INVALID_IO_SUBMIT) {
1005 		CU_ASSERT(g_io_comp_status == false);
1006 		return;
1007 	}
1008 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1009 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
1010 
1011 	CU_ASSERT(splits_reqd == g_io_output_index);
1012 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
1013 		pd_strip = strip / num_base_drives;
1014 		pd_idx = strip % num_base_drives;
1015 		if (strip == start_strip) {
1016 			offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
1017 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
1018 			if (strip == end_strip) {
1019 				pd_blocks = bdev_io->u.bdev.num_blocks;
1020 			} else {
1021 				pd_blocks = g_strip_size - offset_in_strip;
1022 			}
1023 		} else if (strip == end_strip) {
1024 			pd_lba = pd_strip << strip_shift;
1025 			pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
1026 				     (g_strip_size - 1)) + 1;
1027 		} else {
1028 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
1029 			pd_blocks = raid_bdev->strip_size;
1030 		}
1031 		output = &g_io_output[index];
1032 		CU_ASSERT(pd_lba == output->offset_blocks);
1033 		CU_ASSERT(pd_blocks == output->num_blocks);
1034 		CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
1035 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
1036 		CU_ASSERT(bdev_io->type == output->iotype);
1037 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1038 			verify_dif(output->iovs, output->iovcnt, output->md_buf,
1039 				   output->offset_blocks, output->num_blocks,
1040 				   spdk_bdev_desc_get_bdev(raid_bdev->base_bdev_info[pd_idx].desc));
1041 		}
1042 	}
1043 	CU_ASSERT(g_io_comp_status == io_status);
1044 }
1045 
1046 static void
1047 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
1048 			  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
1049 			  uint32_t io_status)
1050 {
1051 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
1052 	uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
1053 	uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
1054 				       g_strip_size;
1055 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
1056 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
1057 			     strip_shift;
1058 	uint8_t n_disks_involved;
1059 	uint64_t start_strip_disk_idx;
1060 	uint64_t end_strip_disk_idx;
1061 	uint64_t nblocks_in_start_disk;
1062 	uint64_t offset_in_start_disk;
1063 	uint8_t disk_idx;
1064 	uint64_t base_io_idx;
1065 	uint64_t sum_nblocks = 0;
1066 	struct io_output *output;
1067 
1068 	if (io_status == INVALID_IO_SUBMIT) {
1069 		CU_ASSERT(g_io_comp_status == false);
1070 		return;
1071 	}
1072 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1073 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
1074 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
1075 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
1076 
1077 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
1078 	CU_ASSERT(n_disks_involved == g_io_output_index);
1079 
1080 	start_strip_disk_idx = start_strip % num_base_drives;
1081 	end_strip_disk_idx = end_strip % num_base_drives;
1082 
1083 	offset_in_start_disk = g_io_output[0].offset_blocks;
1084 	nblocks_in_start_disk = g_io_output[0].num_blocks;
1085 
1086 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
1087 	     base_io_idx++, disk_idx++) {
1088 		uint64_t start_offset_in_disk;
1089 		uint64_t end_offset_in_disk;
1090 
1091 		output = &g_io_output[base_io_idx];
1092 
1093 		/* round disk_idx */
1094 		if (disk_idx >= num_base_drives) {
1095 			disk_idx %= num_base_drives;
1096 		}
1097 
1098 		/* start_offset_in_disk aligned in strip check:
1099 		 * The first base io has a same start_offset_in_strip with the whole raid io.
1100 		 * Other base io should have aligned start_offset_in_strip which is 0.
1101 		 */
1102 		start_offset_in_disk = output->offset_blocks;
1103 		if (base_io_idx == 0) {
1104 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
1105 		} else {
1106 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
1107 		}
1108 
1109 		/* end_offset_in_disk aligned in strip check:
1110 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
1111 		 * with the whole raid io.
1112 		 * Other base io should have aligned end_offset_in_strip.
1113 		 */
1114 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
1115 		if (disk_idx == end_strip_disk_idx) {
1116 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
1117 		} else {
1118 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
1119 		}
1120 
1121 		/* start_offset_in_disk compared with start_disk.
1122 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
1123 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
1124 		 *    must be less than strip size.
1125 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
1126 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
1127 		 *    be less than strip size.
1128 		 */
1129 		if (disk_idx > start_strip_disk_idx) {
1130 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
1131 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
1132 		} else if (disk_idx < start_strip_disk_idx) {
1133 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
1134 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
1135 		}
1136 
1137 		/* nblocks compared with start_disk:
1138 		 * The gap between them must be within a strip size.
1139 		 */
1140 		if (output->num_blocks <= nblocks_in_start_disk) {
1141 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
1142 		} else {
1143 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
1144 		}
1145 
1146 		sum_nblocks += output->num_blocks;
1147 
1148 		CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
1149 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
1150 		CU_ASSERT(bdev_io->type == output->iotype);
1151 	}
1152 
1153 	/* Sum of each nblocks should be same with raid bdev_io */
1154 	CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
1155 
1156 	CU_ASSERT(g_io_comp_status == io_status);
1157 }
1158 
1159 static void
1160 verify_raid_bdev_present(const char *name, bool presence)
1161 {
1162 	struct raid_bdev *pbdev;
1163 	bool   pbdev_found;
1164 
1165 	pbdev_found = false;
1166 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1167 		if (strcmp(pbdev->bdev.name, name) == 0) {
1168 			pbdev_found = true;
1169 			break;
1170 		}
1171 	}
1172 	if (presence == true) {
1173 		CU_ASSERT(pbdev_found == true);
1174 	} else {
1175 		CU_ASSERT(pbdev_found == false);
1176 	}
1177 }
1178 
1179 static void
1180 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
1181 {
1182 	struct raid_bdev *pbdev;
1183 	struct raid_base_bdev_info *base_info;
1184 	struct spdk_bdev *bdev = NULL;
1185 	bool   pbdev_found;
1186 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
1187 
1188 	pbdev_found = false;
1189 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1190 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
1191 			pbdev_found = true;
1192 			if (presence == false) {
1193 				break;
1194 			}
1195 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1196 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
1197 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
1198 					g_block_len)));
1199 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
1200 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
1201 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
1202 			CU_ASSERT(pbdev->level == r->level);
1203 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1204 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
1205 				CU_ASSERT(base_info->desc != NULL);
1206 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
1207 				CU_ASSERT(bdev != NULL);
1208 				CU_ASSERT(base_info->remove_scheduled == false);
1209 				CU_ASSERT((pbdev->superblock_enabled && base_info->data_offset != 0) ||
1210 					  (!pbdev->superblock_enabled && base_info->data_offset == 0));
1211 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
1212 
1213 				if (bdev && base_info->data_size < min_blockcnt) {
1214 					min_blockcnt = base_info->data_size;
1215 				}
1216 			}
1217 			CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
1218 				    (r->strip_size_kb * 1024 / g_block_len)) *
1219 				   r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
1220 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
1221 			CU_ASSERT(pbdev->bdev.write_cache == 0);
1222 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
1223 			if (pbdev->num_base_bdevs > 1) {
1224 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
1225 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
1226 			} else {
1227 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
1228 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
1229 			}
1230 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
1231 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
1232 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
1233 			break;
1234 		}
1235 	}
1236 	if (presence == true) {
1237 		CU_ASSERT(pbdev_found == true);
1238 	} else {
1239 		CU_ASSERT(pbdev_found == false);
1240 	}
1241 }
1242 
1243 static void
1244 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
1245 		 uint8_t g_max_raids,
1246 		 char **g_get_raids_output, uint32_t g_get_raids_count)
1247 {
1248 	uint8_t i, j;
1249 	bool found;
1250 
1251 	CU_ASSERT(g_max_raids == g_get_raids_count);
1252 	if (g_max_raids == g_get_raids_count) {
1253 		for (i = 0; i < g_max_raids; i++) {
1254 			found = false;
1255 			for (j = 0; j < g_max_raids; j++) {
1256 				if (construct_req[i].name &&
1257 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
1258 					found = true;
1259 					break;
1260 				}
1261 			}
1262 			CU_ASSERT(found == true);
1263 		}
1264 	}
1265 }
1266 
1267 static void
1268 create_base_bdevs(uint32_t bbdev_start_idx)
1269 {
1270 	uint8_t i;
1271 	struct spdk_bdev *base_bdev;
1272 	char name[16];
1273 
1274 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
1275 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
1276 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
1277 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
1278 		base_bdev->name = strdup(name);
1279 		spdk_uuid_generate(&base_bdev->uuid);
1280 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
1281 		base_bdev->blocklen = g_block_len;
1282 		base_bdev->blockcnt = BLOCK_CNT;
1283 		if (g_enable_dif) {
1284 			base_bdev->md_interleave = false;
1285 			base_bdev->md_len = MD_SIZE;
1286 			base_bdev->dif_check_flags =
1287 				SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK |
1288 				SPDK_DIF_FLAGS_APPTAG_CHECK;
1289 			base_bdev->dif_type = SPDK_DIF_TYPE1;
1290 		}
1291 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
1292 	}
1293 }
1294 
1295 static void
1296 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1297 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
1298 {
1299 	uint8_t i;
1300 	char name[16];
1301 	uint8_t bbdev_idx = bbdev_start_idx;
1302 
1303 	r->name = strdup(raid_name);
1304 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1305 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
1306 	r->level = RAID0;
1307 	r->superblock_enabled = superblock_enabled;
1308 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
1309 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
1310 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
1311 		r->base_bdevs.base_bdevs[i] = strdup(name);
1312 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
1313 	}
1314 	if (create_base_bdev == true) {
1315 		create_base_bdevs(bbdev_start_idx);
1316 	}
1317 	g_rpc_req = r;
1318 	g_rpc_req_size = sizeof(*r);
1319 }
1320 
1321 static void
1322 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1323 			    uint8_t bbdev_start_idx, bool create_base_bdev,
1324 			    uint8_t json_decode_obj_err, bool superblock_enabled)
1325 {
1326 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
1327 
1328 	g_rpc_err = 0;
1329 	g_json_decode_obj_create = 1;
1330 	g_json_decode_obj_err = json_decode_obj_err;
1331 	g_config_level_create = 0;
1332 	g_test_multi_raids = 0;
1333 }
1334 
1335 static void
1336 free_test_req(struct rpc_bdev_raid_create *r)
1337 {
1338 	uint8_t i;
1339 
1340 	free(r->name);
1341 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
1342 		free(r->base_bdevs.base_bdevs[i]);
1343 	}
1344 }
1345 
1346 static void
1347 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
1348 			    uint8_t json_decode_obj_err)
1349 {
1350 	r->name = strdup(raid_name);
1351 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1352 
1353 	g_rpc_req = r;
1354 	g_rpc_req_size = sizeof(*r);
1355 	g_rpc_err = 0;
1356 	g_json_decode_obj_create = 0;
1357 	g_json_decode_obj_err = json_decode_obj_err;
1358 	g_config_level_create = 0;
1359 	g_test_multi_raids = 0;
1360 }
1361 
1362 static void
1363 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
1364 		     uint8_t json_decode_obj_err)
1365 {
1366 	r->category = strdup(category);
1367 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
1368 
1369 	g_rpc_req = r;
1370 	g_rpc_req_size = sizeof(*r);
1371 	g_rpc_err = 0;
1372 	g_json_decode_obj_create = 0;
1373 	g_json_decode_obj_err = json_decode_obj_err;
1374 	g_config_level_create = 0;
1375 	g_test_multi_raids = 1;
1376 	g_get_raids_count = 0;
1377 }
1378 
1379 static void
1380 test_create_raid(void)
1381 {
1382 	struct rpc_bdev_raid_create req;
1383 	struct rpc_bdev_raid_delete delete_req;
1384 
1385 	set_globals();
1386 	CU_ASSERT(raid_bdev_init() == 0);
1387 
1388 	verify_raid_bdev_present("raid1", false);
1389 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1390 	rpc_bdev_raid_create(NULL, NULL);
1391 	CU_ASSERT(g_rpc_err == 0);
1392 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1393 	free_test_req(&req);
1394 
1395 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1396 	rpc_bdev_raid_delete(NULL, NULL);
1397 	CU_ASSERT(g_rpc_err == 0);
1398 	raid_bdev_exit();
1399 	base_bdevs_cleanup();
1400 	reset_globals();
1401 }
1402 
1403 static void
1404 test_delete_raid(void)
1405 {
1406 	struct rpc_bdev_raid_create construct_req;
1407 	struct rpc_bdev_raid_delete delete_req;
1408 
1409 	set_globals();
1410 	CU_ASSERT(raid_bdev_init() == 0);
1411 
1412 	verify_raid_bdev_present("raid1", false);
1413 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1414 	rpc_bdev_raid_create(NULL, NULL);
1415 	CU_ASSERT(g_rpc_err == 0);
1416 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1417 	free_test_req(&construct_req);
1418 
1419 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1420 	rpc_bdev_raid_delete(NULL, NULL);
1421 	CU_ASSERT(g_rpc_err == 0);
1422 	verify_raid_bdev_present("raid1", false);
1423 
1424 	raid_bdev_exit();
1425 	base_bdevs_cleanup();
1426 	reset_globals();
1427 }
1428 
1429 static void
1430 test_create_raid_invalid_args(void)
1431 {
1432 	struct rpc_bdev_raid_create req;
1433 	struct rpc_bdev_raid_delete destroy_req;
1434 	struct raid_bdev *raid_bdev;
1435 
1436 	set_globals();
1437 	CU_ASSERT(raid_bdev_init() == 0);
1438 
1439 	verify_raid_bdev_present("raid1", false);
1440 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1441 	req.level = INVALID_RAID_LEVEL;
1442 	rpc_bdev_raid_create(NULL, NULL);
1443 	CU_ASSERT(g_rpc_err == 1);
1444 	free_test_req(&req);
1445 	verify_raid_bdev_present("raid1", false);
1446 
1447 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
1448 	rpc_bdev_raid_create(NULL, NULL);
1449 	CU_ASSERT(g_rpc_err == 1);
1450 	free_test_req(&req);
1451 	verify_raid_bdev_present("raid1", false);
1452 
1453 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1454 	req.strip_size_kb = 1231;
1455 	rpc_bdev_raid_create(NULL, NULL);
1456 	CU_ASSERT(g_rpc_err == 1);
1457 	free_test_req(&req);
1458 	verify_raid_bdev_present("raid1", false);
1459 
1460 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1461 	rpc_bdev_raid_create(NULL, NULL);
1462 	CU_ASSERT(g_rpc_err == 0);
1463 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1464 	free_test_req(&req);
1465 
1466 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1467 	rpc_bdev_raid_create(NULL, NULL);
1468 	CU_ASSERT(g_rpc_err == 1);
1469 	free_test_req(&req);
1470 
1471 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
1472 	rpc_bdev_raid_create(NULL, NULL);
1473 	CU_ASSERT(g_rpc_err == 1);
1474 	free_test_req(&req);
1475 	verify_raid_bdev_present("raid2", false);
1476 
1477 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1478 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1479 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1480 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1481 	rpc_bdev_raid_create(NULL, NULL);
1482 	CU_ASSERT(g_rpc_err == 1);
1483 	free_test_req(&req);
1484 	verify_raid_bdev_present("raid2", false);
1485 
1486 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1487 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1488 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1489 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1490 	rpc_bdev_raid_create(NULL, NULL);
1491 	CU_ASSERT(g_rpc_err == 0);
1492 	free_test_req(&req);
1493 	verify_raid_bdev_present("raid2", true);
1494 	raid_bdev = raid_bdev_find_by_name("raid2");
1495 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1496 	check_and_remove_raid_bdev(raid_bdev);
1497 
1498 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1499 	rpc_bdev_raid_create(NULL, NULL);
1500 	CU_ASSERT(g_rpc_err == 0);
1501 	free_test_req(&req);
1502 	verify_raid_bdev_present("raid2", true);
1503 	verify_raid_bdev_present("raid1", true);
1504 
1505 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1506 	rpc_bdev_raid_delete(NULL, NULL);
1507 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1508 	rpc_bdev_raid_delete(NULL, NULL);
1509 	raid_bdev_exit();
1510 	base_bdevs_cleanup();
1511 	reset_globals();
1512 }
1513 
1514 static void
1515 test_delete_raid_invalid_args(void)
1516 {
1517 	struct rpc_bdev_raid_create construct_req;
1518 	struct rpc_bdev_raid_delete destroy_req;
1519 
1520 	set_globals();
1521 	CU_ASSERT(raid_bdev_init() == 0);
1522 
1523 	verify_raid_bdev_present("raid1", false);
1524 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1525 	rpc_bdev_raid_create(NULL, NULL);
1526 	CU_ASSERT(g_rpc_err == 0);
1527 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1528 	free_test_req(&construct_req);
1529 
1530 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1531 	rpc_bdev_raid_delete(NULL, NULL);
1532 	CU_ASSERT(g_rpc_err == 1);
1533 
1534 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1535 	rpc_bdev_raid_delete(NULL, NULL);
1536 	CU_ASSERT(g_rpc_err == 1);
1537 	free(destroy_req.name);
1538 	verify_raid_bdev_present("raid1", true);
1539 
1540 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1541 	rpc_bdev_raid_delete(NULL, NULL);
1542 	CU_ASSERT(g_rpc_err == 0);
1543 	verify_raid_bdev_present("raid1", false);
1544 
1545 	raid_bdev_exit();
1546 	base_bdevs_cleanup();
1547 	reset_globals();
1548 }
1549 
1550 static void
1551 test_io_channel(void)
1552 {
1553 	struct rpc_bdev_raid_create req;
1554 	struct rpc_bdev_raid_delete destroy_req;
1555 	struct raid_bdev *pbdev;
1556 	struct spdk_io_channel *ch;
1557 	struct raid_bdev_io_channel *ch_ctx;
1558 
1559 	set_globals();
1560 	CU_ASSERT(raid_bdev_init() == 0);
1561 
1562 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1563 	verify_raid_bdev_present("raid1", false);
1564 	rpc_bdev_raid_create(NULL, NULL);
1565 	CU_ASSERT(g_rpc_err == 0);
1566 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1567 
1568 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1569 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1570 			break;
1571 		}
1572 	}
1573 	CU_ASSERT(pbdev != NULL);
1574 
1575 	ch = spdk_get_io_channel(pbdev);
1576 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1577 
1578 	ch_ctx = spdk_io_channel_get_ctx(ch);
1579 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1580 
1581 	free_test_req(&req);
1582 
1583 	spdk_put_io_channel(ch);
1584 
1585 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1586 	rpc_bdev_raid_delete(NULL, NULL);
1587 	CU_ASSERT(g_rpc_err == 0);
1588 	verify_raid_bdev_present("raid1", false);
1589 
1590 	raid_bdev_exit();
1591 	base_bdevs_cleanup();
1592 	reset_globals();
1593 }
1594 
1595 static void
1596 test_write_io(void)
1597 {
1598 	struct rpc_bdev_raid_create req;
1599 	struct rpc_bdev_raid_delete destroy_req;
1600 	struct raid_bdev *pbdev;
1601 	struct spdk_io_channel *ch;
1602 	struct raid_bdev_io_channel *ch_ctx;
1603 	uint8_t i;
1604 	struct spdk_bdev_io *bdev_io;
1605 	uint64_t io_len;
1606 	uint64_t lba = 0;
1607 
1608 	set_globals();
1609 	CU_ASSERT(raid_bdev_init() == 0);
1610 
1611 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1612 	verify_raid_bdev_present("raid1", false);
1613 	rpc_bdev_raid_create(NULL, NULL);
1614 	CU_ASSERT(g_rpc_err == 0);
1615 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1616 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1617 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1618 			break;
1619 		}
1620 	}
1621 	CU_ASSERT(pbdev != NULL);
1622 
1623 	ch = spdk_get_io_channel(pbdev);
1624 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1625 
1626 	ch_ctx = spdk_io_channel_get_ctx(ch);
1627 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1628 
1629 	/* test 2 IO sizes based on global strip size set earlier */
1630 	for (i = 0; i < 2; i++) {
1631 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1632 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1633 		io_len = (g_strip_size / 2) << i;
1634 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1635 		lba += g_strip_size;
1636 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1637 		g_io_output_index = 0;
1638 		generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
1639 			     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
1640 		raid_bdev_submit_request(ch, bdev_io);
1641 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1642 			  g_child_io_status_flag);
1643 		bdev_io_cleanup(bdev_io);
1644 	}
1645 
1646 	free_test_req(&req);
1647 	spdk_put_io_channel(ch);
1648 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1649 	rpc_bdev_raid_delete(NULL, NULL);
1650 	CU_ASSERT(g_rpc_err == 0);
1651 	verify_raid_bdev_present("raid1", false);
1652 
1653 	raid_bdev_exit();
1654 	base_bdevs_cleanup();
1655 	reset_globals();
1656 }
1657 
1658 static void
1659 test_read_io(void)
1660 {
1661 	struct rpc_bdev_raid_create req;
1662 	struct rpc_bdev_raid_delete destroy_req;
1663 	struct raid_bdev *pbdev;
1664 	struct spdk_io_channel *ch;
1665 	struct raid_bdev_io_channel *ch_ctx;
1666 	uint8_t i;
1667 	struct spdk_bdev_io *bdev_io;
1668 	uint64_t io_len;
1669 	uint64_t lba;
1670 
1671 	set_globals();
1672 	CU_ASSERT(raid_bdev_init() == 0);
1673 
1674 	verify_raid_bdev_present("raid1", false);
1675 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1676 	rpc_bdev_raid_create(NULL, NULL);
1677 	CU_ASSERT(g_rpc_err == 0);
1678 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1679 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1680 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1681 			break;
1682 		}
1683 	}
1684 	CU_ASSERT(pbdev != NULL);
1685 
1686 	ch = spdk_get_io_channel(pbdev);
1687 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1688 
1689 	ch_ctx = spdk_io_channel_get_ctx(ch);
1690 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1691 
1692 	/* test 2 IO sizes based on global strip size set earlier */
1693 	lba = 0;
1694 	for (i = 0; i < 2; i++) {
1695 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1696 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1697 		io_len = (g_strip_size / 2) << i;
1698 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
1699 		lba += g_strip_size;
1700 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1701 		g_io_output_index = 0;
1702 		raid_bdev_submit_request(ch, bdev_io);
1703 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1704 			  g_child_io_status_flag);
1705 		bdev_io_cleanup(bdev_io);
1706 	}
1707 
1708 	free_test_req(&req);
1709 	spdk_put_io_channel(ch);
1710 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1711 	rpc_bdev_raid_delete(NULL, NULL);
1712 	CU_ASSERT(g_rpc_err == 0);
1713 	verify_raid_bdev_present("raid1", false);
1714 
1715 	raid_bdev_exit();
1716 	base_bdevs_cleanup();
1717 	reset_globals();
1718 }
1719 
1720 static void
1721 raid_bdev_io_generate_by_strips(uint64_t n_strips)
1722 {
1723 	uint64_t lba;
1724 	uint64_t nblocks;
1725 	uint64_t start_offset;
1726 	uint64_t end_offset;
1727 	uint64_t offsets_in_strip[3];
1728 	uint64_t start_bdev_idx;
1729 	uint64_t start_bdev_offset;
1730 	uint64_t start_bdev_idxs[3];
1731 	int i, j, l;
1732 
1733 	/* 3 different situations of offset in strip */
1734 	offsets_in_strip[0] = 0;
1735 	offsets_in_strip[1] = g_strip_size >> 1;
1736 	offsets_in_strip[2] = g_strip_size - 1;
1737 
1738 	/* 3 different situations of start_bdev_idx */
1739 	start_bdev_idxs[0] = 0;
1740 	start_bdev_idxs[1] = g_max_base_drives >> 1;
1741 	start_bdev_idxs[2] = g_max_base_drives - 1;
1742 
1743 	/* consider different offset in strip */
1744 	for (i = 0; i < 3; i++) {
1745 		start_offset = offsets_in_strip[i];
1746 		for (j = 0; j < 3; j++) {
1747 			end_offset = offsets_in_strip[j];
1748 			if (n_strips == 1 && start_offset > end_offset) {
1749 				continue;
1750 			}
1751 
1752 			/* consider at which base_bdev lba is started. */
1753 			for (l = 0; l < 3; l++) {
1754 				start_bdev_idx = start_bdev_idxs[l];
1755 				start_bdev_offset = start_bdev_idx * g_strip_size;
1756 				lba = g_lba_offset + start_bdev_offset + start_offset;
1757 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
1758 
1759 				g_io_ranges[g_io_range_idx].lba = lba;
1760 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
1761 
1762 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
1763 				g_io_range_idx++;
1764 			}
1765 		}
1766 	}
1767 }
1768 
1769 static void
1770 raid_bdev_io_generate(void)
1771 {
1772 	uint64_t n_strips;
1773 	uint64_t n_strips_span = g_max_base_drives;
1774 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
1775 				      g_max_base_drives * 2, g_max_base_drives * 3,
1776 				      g_max_base_drives * 4
1777 				     };
1778 	uint32_t i;
1779 
1780 	g_io_range_idx = 0;
1781 
1782 	/* consider different number of strips from 1 to strips spanned base bdevs,
1783 	 * and even to times of strips spanned base bdevs
1784 	 */
1785 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
1786 		raid_bdev_io_generate_by_strips(n_strips);
1787 	}
1788 
1789 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
1790 		n_strips = n_strips_times[i];
1791 		raid_bdev_io_generate_by_strips(n_strips);
1792 	}
1793 }
1794 
1795 static void
1796 test_unmap_io(void)
1797 {
1798 	struct rpc_bdev_raid_create req;
1799 	struct rpc_bdev_raid_delete destroy_req;
1800 	struct raid_bdev *pbdev;
1801 	struct spdk_io_channel *ch;
1802 	struct raid_bdev_io_channel *ch_ctx;
1803 	struct spdk_bdev_io *bdev_io;
1804 	uint32_t count;
1805 	uint64_t io_len;
1806 	uint64_t lba;
1807 
1808 	set_globals();
1809 	CU_ASSERT(raid_bdev_init() == 0);
1810 
1811 	verify_raid_bdev_present("raid1", false);
1812 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1813 	rpc_bdev_raid_create(NULL, NULL);
1814 	CU_ASSERT(g_rpc_err == 0);
1815 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1816 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1817 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1818 			break;
1819 		}
1820 	}
1821 	CU_ASSERT(pbdev != NULL);
1822 
1823 	ch = spdk_get_io_channel(pbdev);
1824 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1825 
1826 	ch_ctx = spdk_io_channel_get_ctx(ch);
1827 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1828 
1829 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
1830 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
1831 
1832 	raid_bdev_io_generate();
1833 	for (count = 0; count < g_io_range_idx; count++) {
1834 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1835 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1836 		io_len = g_io_ranges[count].nblocks;
1837 		lba = g_io_ranges[count].lba;
1838 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
1839 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1840 		g_io_output_index = 0;
1841 		raid_bdev_submit_request(ch, bdev_io);
1842 		verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1843 					  g_child_io_status_flag);
1844 		bdev_io_cleanup(bdev_io);
1845 	}
1846 
1847 	free_test_req(&req);
1848 	spdk_put_io_channel(ch);
1849 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1850 	rpc_bdev_raid_delete(NULL, NULL);
1851 	CU_ASSERT(g_rpc_err == 0);
1852 	verify_raid_bdev_present("raid1", false);
1853 
1854 	raid_bdev_exit();
1855 	base_bdevs_cleanup();
1856 	reset_globals();
1857 }
1858 
1859 /* Test IO failures */
1860 static void
1861 test_io_failure(void)
1862 {
1863 	struct rpc_bdev_raid_create req;
1864 	struct rpc_bdev_raid_delete destroy_req;
1865 	struct raid_bdev *pbdev;
1866 	struct spdk_io_channel *ch;
1867 	struct raid_bdev_io_channel *ch_ctx;
1868 	struct spdk_bdev_io *bdev_io;
1869 	uint32_t count;
1870 	uint64_t io_len;
1871 	uint64_t lba;
1872 
1873 	set_globals();
1874 	CU_ASSERT(raid_bdev_init() == 0);
1875 
1876 	verify_raid_bdev_present("raid1", false);
1877 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1878 	rpc_bdev_raid_create(NULL, NULL);
1879 	CU_ASSERT(g_rpc_err == 0);
1880 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1881 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1882 		if (strcmp(pbdev->bdev.name, req.name) == 0) {
1883 			break;
1884 		}
1885 	}
1886 	CU_ASSERT(pbdev != NULL);
1887 
1888 	ch = spdk_get_io_channel(pbdev);
1889 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1890 
1891 	ch_ctx = spdk_io_channel_get_ctx(ch);
1892 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1893 
1894 	lba = 0;
1895 	for (count = 0; count < 1; count++) {
1896 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1897 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1898 		io_len = (g_strip_size / 2) << count;
1899 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
1900 		lba += g_strip_size;
1901 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1902 		g_io_output_index = 0;
1903 		raid_bdev_submit_request(ch, bdev_io);
1904 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1905 			  INVALID_IO_SUBMIT);
1906 		bdev_io_cleanup(bdev_io);
1907 	}
1908 
1909 
1910 	lba = 0;
1911 	g_child_io_status_flag = false;
1912 	for (count = 0; count < 1; count++) {
1913 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1914 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1915 		io_len = (g_strip_size / 2) << count;
1916 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1917 		lba += g_strip_size;
1918 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1919 		g_io_output_index = 0;
1920 		generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
1921 			     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
1922 		raid_bdev_submit_request(ch, bdev_io);
1923 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1924 			  g_child_io_status_flag);
1925 		bdev_io_cleanup(bdev_io);
1926 	}
1927 
1928 	free_test_req(&req);
1929 	spdk_put_io_channel(ch);
1930 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1931 	rpc_bdev_raid_delete(NULL, NULL);
1932 	CU_ASSERT(g_rpc_err == 0);
1933 	verify_raid_bdev_present("raid1", false);
1934 
1935 	raid_bdev_exit();
1936 	base_bdevs_cleanup();
1937 	reset_globals();
1938 }
1939 
1940 /* Test reset IO */
1941 static void
1942 test_reset_io(void)
1943 {
1944 	struct rpc_bdev_raid_create req;
1945 	struct rpc_bdev_raid_delete destroy_req;
1946 	struct raid_bdev *pbdev;
1947 	struct spdk_io_channel *ch;
1948 	struct raid_bdev_io_channel *ch_ctx;
1949 	struct spdk_bdev_io *bdev_io;
1950 
1951 	set_globals();
1952 	CU_ASSERT(raid_bdev_init() == 0);
1953 
1954 	verify_raid_bdev_present("raid1", false);
1955 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1956 	rpc_bdev_raid_create(NULL, NULL);
1957 	CU_ASSERT(g_rpc_err == 0);
1958 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1959 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1960 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1961 			break;
1962 		}
1963 	}
1964 	CU_ASSERT(pbdev != NULL);
1965 
1966 	ch = spdk_get_io_channel(pbdev);
1967 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1968 
1969 	ch_ctx = spdk_io_channel_get_ctx(ch);
1970 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1971 
1972 	g_bdev_io_submit_status = 0;
1973 	g_child_io_status_flag = true;
1974 
1975 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1976 
1977 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1978 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1979 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1980 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1981 	g_io_output_index = 0;
1982 	raid_bdev_submit_request(ch, bdev_io);
1983 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1984 			true);
1985 	bdev_io_cleanup(bdev_io);
1986 
1987 	free_test_req(&req);
1988 	spdk_put_io_channel(ch);
1989 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1990 	rpc_bdev_raid_delete(NULL, NULL);
1991 	CU_ASSERT(g_rpc_err == 0);
1992 	verify_raid_bdev_present("raid1", false);
1993 
1994 	raid_bdev_exit();
1995 	base_bdevs_cleanup();
1996 	reset_globals();
1997 }
1998 
1999 /* Create multiple raids, destroy raids without IO, get_raids related tests */
2000 static void
2001 test_multi_raid_no_io(void)
2002 {
2003 	struct rpc_bdev_raid_create *construct_req;
2004 	struct rpc_bdev_raid_delete destroy_req;
2005 	struct rpc_bdev_raid_get_bdevs get_raids_req;
2006 	uint8_t i;
2007 	char name[16];
2008 	uint8_t bbdev_idx = 0;
2009 
2010 	set_globals();
2011 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
2012 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
2013 	CU_ASSERT(raid_bdev_init() == 0);
2014 	for (i = 0; i < g_max_raids; i++) {
2015 		snprintf(name, 16, "%s%u", "raid", i);
2016 		verify_raid_bdev_present(name, false);
2017 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
2018 		bbdev_idx += g_max_base_drives;
2019 		rpc_bdev_raid_create(NULL, NULL);
2020 		CU_ASSERT(g_rpc_err == 0);
2021 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
2022 	}
2023 
2024 	create_get_raids_req(&get_raids_req, "all", 0);
2025 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2026 	CU_ASSERT(g_rpc_err == 0);
2027 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
2028 	for (i = 0; i < g_get_raids_count; i++) {
2029 		free(g_get_raids_output[i]);
2030 	}
2031 
2032 	create_get_raids_req(&get_raids_req, "online", 0);
2033 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2034 	CU_ASSERT(g_rpc_err == 0);
2035 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
2036 	for (i = 0; i < g_get_raids_count; i++) {
2037 		free(g_get_raids_output[i]);
2038 	}
2039 
2040 	create_get_raids_req(&get_raids_req, "configuring", 0);
2041 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2042 	CU_ASSERT(g_rpc_err == 0);
2043 	CU_ASSERT(g_get_raids_count == 0);
2044 
2045 	create_get_raids_req(&get_raids_req, "offline", 0);
2046 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2047 	CU_ASSERT(g_rpc_err == 0);
2048 	CU_ASSERT(g_get_raids_count == 0);
2049 
2050 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
2051 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2052 	CU_ASSERT(g_rpc_err == 1);
2053 	CU_ASSERT(g_get_raids_count == 0);
2054 
2055 	create_get_raids_req(&get_raids_req, "all", 1);
2056 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2057 	CU_ASSERT(g_rpc_err == 1);
2058 	free(get_raids_req.category);
2059 	CU_ASSERT(g_get_raids_count == 0);
2060 
2061 	create_get_raids_req(&get_raids_req, "all", 0);
2062 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2063 	CU_ASSERT(g_rpc_err == 0);
2064 	CU_ASSERT(g_get_raids_count == g_max_raids);
2065 	for (i = 0; i < g_get_raids_count; i++) {
2066 		free(g_get_raids_output[i]);
2067 	}
2068 
2069 	for (i = 0; i < g_max_raids; i++) {
2070 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
2071 		snprintf(name, 16, "%s", construct_req[i].name);
2072 		create_raid_bdev_delete_req(&destroy_req, name, 0);
2073 		rpc_bdev_raid_delete(NULL, NULL);
2074 		CU_ASSERT(g_rpc_err == 0);
2075 		verify_raid_bdev_present(name, false);
2076 	}
2077 	raid_bdev_exit();
2078 	for (i = 0; i < g_max_raids; i++) {
2079 		free_test_req(&construct_req[i]);
2080 	}
2081 	free(construct_req);
2082 	base_bdevs_cleanup();
2083 	reset_globals();
2084 }
2085 
2086 /* Create multiple raids, fire IOs on raids */
2087 static void
2088 test_multi_raid_with_io(void)
2089 {
2090 	struct rpc_bdev_raid_create *construct_req;
2091 	struct rpc_bdev_raid_delete destroy_req;
2092 	uint8_t i;
2093 	char name[16];
2094 	uint8_t bbdev_idx = 0;
2095 	struct raid_bdev *pbdev;
2096 	struct spdk_io_channel **channels;
2097 	struct spdk_bdev_io *bdev_io;
2098 	uint64_t io_len;
2099 	uint64_t lba = 0;
2100 	int16_t iotype;
2101 
2102 	set_globals();
2103 	construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
2104 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
2105 	CU_ASSERT(raid_bdev_init() == 0);
2106 	channels = calloc(g_max_raids, sizeof(*channels));
2107 	SPDK_CU_ASSERT_FATAL(channels != NULL);
2108 
2109 	for (i = 0; i < g_max_raids; i++) {
2110 		snprintf(name, 16, "%s%u", "raid", i);
2111 		verify_raid_bdev_present(name, false);
2112 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
2113 		bbdev_idx += g_max_base_drives;
2114 		rpc_bdev_raid_create(NULL, NULL);
2115 		CU_ASSERT(g_rpc_err == 0);
2116 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
2117 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2118 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
2119 				break;
2120 			}
2121 		}
2122 		CU_ASSERT(pbdev != NULL);
2123 
2124 		channels[i] = spdk_get_io_channel(pbdev);
2125 		SPDK_CU_ASSERT_FATAL(channels[i] != NULL);
2126 	}
2127 
2128 	/* This will perform a write on the first raid and a read on the second. It can be
2129 	 * expanded in the future to perform r/w on each raid device in the event that
2130 	 * multiple raid levels are supported.
2131 	 */
2132 	for (i = 0; i < g_max_raids; i++) {
2133 		struct spdk_io_channel *ch = channels[i];
2134 		struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch);
2135 
2136 		SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
2137 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2138 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2139 		io_len = g_strip_size;
2140 		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
2141 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2142 		g_io_output_index = 0;
2143 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2144 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
2145 				break;
2146 			}
2147 		}
2148 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype);
2149 		CU_ASSERT(pbdev != NULL);
2150 		if (iotype == SPDK_BDEV_IO_TYPE_WRITE) {
2151 			generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2152 				     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2153 		}
2154 		raid_bdev_submit_request(ch, bdev_io);
2155 		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
2156 			  g_child_io_status_flag);
2157 		bdev_io_cleanup(bdev_io);
2158 	}
2159 
2160 	for (i = 0; i < g_max_raids; i++) {
2161 		spdk_put_io_channel(channels[i]);
2162 		snprintf(name, 16, "%s", construct_req[i].name);
2163 		create_raid_bdev_delete_req(&destroy_req, name, 0);
2164 		rpc_bdev_raid_delete(NULL, NULL);
2165 		CU_ASSERT(g_rpc_err == 0);
2166 		verify_raid_bdev_present(name, false);
2167 	}
2168 	raid_bdev_exit();
2169 	for (i = 0; i < g_max_raids; i++) {
2170 		free_test_req(&construct_req[i]);
2171 	}
2172 	free(construct_req);
2173 	free(channels);
2174 	base_bdevs_cleanup();
2175 	reset_globals();
2176 }
2177 
2178 static void
2179 test_io_type_supported(void)
2180 {
2181 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
2182 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
2183 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
2184 }
2185 
2186 static void
2187 test_raid_json_dump_info(void)
2188 {
2189 	struct rpc_bdev_raid_create req;
2190 	struct rpc_bdev_raid_delete destroy_req;
2191 	struct raid_bdev *pbdev;
2192 
2193 	set_globals();
2194 	CU_ASSERT(raid_bdev_init() == 0);
2195 
2196 	verify_raid_bdev_present("raid1", false);
2197 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2198 	rpc_bdev_raid_create(NULL, NULL);
2199 	CU_ASSERT(g_rpc_err == 0);
2200 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2201 
2202 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2203 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2204 			break;
2205 		}
2206 	}
2207 	CU_ASSERT(pbdev != NULL);
2208 
2209 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
2210 
2211 	free_test_req(&req);
2212 
2213 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2214 	rpc_bdev_raid_delete(NULL, NULL);
2215 	CU_ASSERT(g_rpc_err == 0);
2216 	verify_raid_bdev_present("raid1", false);
2217 
2218 	raid_bdev_exit();
2219 	base_bdevs_cleanup();
2220 	reset_globals();
2221 }
2222 
2223 static void
2224 test_context_size(void)
2225 {
2226 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
2227 }
2228 
2229 static void
2230 test_raid_level_conversions(void)
2231 {
2232 	const char *raid_str;
2233 
2234 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
2235 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
2236 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
2237 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
2238 
2239 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
2240 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2241 	raid_str = raid_bdev_level_to_str(1234);
2242 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2243 	raid_str = raid_bdev_level_to_str(RAID0);
2244 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
2245 }
2246 
2247 static void
2248 test_create_raid_superblock(void)
2249 {
2250 	struct rpc_bdev_raid_create req;
2251 	struct rpc_bdev_raid_delete delete_req;
2252 
2253 	set_globals();
2254 	CU_ASSERT(raid_bdev_init() == 0);
2255 
2256 	verify_raid_bdev_present("raid1", false);
2257 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
2258 	rpc_bdev_raid_create(NULL, NULL);
2259 	CU_ASSERT(g_rpc_err == 0);
2260 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2261 	free_test_req(&req);
2262 
2263 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
2264 	rpc_bdev_raid_delete(NULL, NULL);
2265 	CU_ASSERT(g_rpc_err == 0);
2266 	raid_bdev_exit();
2267 	base_bdevs_cleanup();
2268 	reset_globals();
2269 }
2270 
2271 static void
2272 complete_process_request(void *ctx)
2273 {
2274 	struct raid_bdev_process_request *process_req = ctx;
2275 
2276 	raid_bdev_process_request_complete(process_req, 0);
2277 }
2278 
2279 static int
2280 submit_process_request(struct raid_bdev_process_request *process_req,
2281 		       struct raid_bdev_io_channel *raid_ch)
2282 {
2283 	struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch));
2284 
2285 	*(uint64_t *)raid_bdev->module_private += process_req->num_blocks;
2286 
2287 	spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req);
2288 
2289 	return process_req->num_blocks;
2290 }
2291 
2292 static void
2293 test_raid_process(void)
2294 {
2295 	struct rpc_bdev_raid_create req;
2296 	struct rpc_bdev_raid_delete destroy_req;
2297 	struct raid_bdev *pbdev;
2298 	struct spdk_bdev *base_bdev;
2299 	struct spdk_thread *process_thread;
2300 	uint64_t num_blocks_processed = 0;
2301 
2302 	set_globals();
2303 	CU_ASSERT(raid_bdev_init() == 0);
2304 
2305 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2306 	verify_raid_bdev_present("raid1", false);
2307 	TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) {
2308 		base_bdev->blockcnt = 128;
2309 	}
2310 	rpc_bdev_raid_create(NULL, NULL);
2311 	CU_ASSERT(g_rpc_err == 0);
2312 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2313 	free_test_req(&req);
2314 
2315 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2316 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2317 			break;
2318 		}
2319 	}
2320 	CU_ASSERT(pbdev != NULL);
2321 
2322 	pbdev->module->submit_process_request = submit_process_request;
2323 	pbdev->module_private = &num_blocks_processed;
2324 
2325 	CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0);
2326 	poll_app_thread();
2327 
2328 	SPDK_CU_ASSERT_FATAL(pbdev->process != NULL);
2329 
2330 	process_thread = g_latest_thread;
2331 	spdk_thread_poll(process_thread, 0, 0);
2332 	SPDK_CU_ASSERT_FATAL(pbdev->process->thread == process_thread);
2333 
2334 	while (spdk_thread_poll(process_thread, 0, 0) > 0) {
2335 		poll_app_thread();
2336 	}
2337 
2338 	CU_ASSERT(pbdev->process == NULL);
2339 	CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt);
2340 
2341 	poll_app_thread();
2342 
2343 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2344 	rpc_bdev_raid_delete(NULL, NULL);
2345 	CU_ASSERT(g_rpc_err == 0);
2346 	verify_raid_bdev_present("raid1", false);
2347 
2348 	raid_bdev_exit();
2349 	base_bdevs_cleanup();
2350 	reset_globals();
2351 }
2352 
2353 static void
2354 test_raid_io_split(void)
2355 {
2356 	struct rpc_bdev_raid_create req;
2357 	struct rpc_bdev_raid_delete destroy_req;
2358 	struct raid_bdev *pbdev;
2359 	struct spdk_io_channel *ch;
2360 	struct raid_bdev_io_channel *raid_ch;
2361 	struct spdk_bdev_io *bdev_io;
2362 	struct raid_bdev_io *raid_io;
2363 	uint64_t split_offset;
2364 	struct iovec iovs_orig[4];
2365 	struct raid_bdev_process process = { };
2366 
2367 	set_globals();
2368 	CU_ASSERT(raid_bdev_init() == 0);
2369 
2370 	verify_raid_bdev_present("raid1", false);
2371 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2372 	rpc_bdev_raid_create(NULL, NULL);
2373 	CU_ASSERT(g_rpc_err == 0);
2374 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2375 
2376 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2377 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2378 			break;
2379 		}
2380 	}
2381 	CU_ASSERT(pbdev != NULL);
2382 	pbdev->bdev.md_len = 8;
2383 
2384 	process.raid_bdev = pbdev;
2385 	process.target = &pbdev->base_bdev_info[0];
2386 	pbdev->process = &process;
2387 	ch = spdk_get_io_channel(pbdev);
2388 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2389 	raid_ch = spdk_io_channel_get_ctx(ch);
2390 	g_bdev_io_defer_completion = true;
2391 
2392 	/* test split of bdev_io with 1 iovec */
2393 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2394 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2395 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2396 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE);
2397 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2398 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2399 	g_io_output_index = 0;
2400 
2401 	split_offset = 1;
2402 	raid_ch->process.offset = split_offset;
2403 	generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2404 		     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2405 	raid_bdev_submit_request(ch, bdev_io);
2406 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2407 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2408 	CU_ASSERT(raid_io->iovcnt == 1);
2409 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2410 	CU_ASSERT(raid_io->iovs == raid_io->split.iov);
2411 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len);
2412 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len);
2413 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2414 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2415 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2416 	}
2417 	complete_deferred_ios();
2418 	CU_ASSERT(raid_io->num_blocks == split_offset);
2419 	CU_ASSERT(raid_io->offset_blocks == 0);
2420 	CU_ASSERT(raid_io->iovcnt == 1);
2421 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2422 	CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len);
2423 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2424 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2425 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2426 	}
2427 	complete_deferred_ios();
2428 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2429 	CU_ASSERT(raid_io->offset_blocks == 0);
2430 	CU_ASSERT(raid_io->iovcnt == 1);
2431 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2432 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len);
2433 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2434 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2435 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2436 	}
2437 
2438 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2439 	CU_ASSERT(g_io_output_index == 2);
2440 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2441 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2442 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2443 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2444 	bdev_io_cleanup(bdev_io);
2445 
2446 	/* test split of bdev_io with 4 iovecs */
2447 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2448 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2449 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2450 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE,
2451 			    4, g_strip_size / 4 * g_block_len);
2452 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2453 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2454 	g_io_output_index = 0;
2455 
2456 	split_offset = 1; /* split at the first iovec */
2457 	raid_ch->process.offset = split_offset;
2458 	generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2459 		     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2460 	raid_bdev_submit_request(ch, bdev_io);
2461 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2462 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2463 	CU_ASSERT(raid_io->iovcnt == 4);
2464 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]);
2465 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]);
2466 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len);
2467 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len -  g_block_len);
2468 	CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0);
2469 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2470 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2471 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2472 	}
2473 	complete_deferred_ios();
2474 	CU_ASSERT(raid_io->num_blocks == split_offset);
2475 	CU_ASSERT(raid_io->offset_blocks == 0);
2476 	CU_ASSERT(raid_io->iovcnt == 1);
2477 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2478 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base);
2479 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2480 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2481 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2482 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2483 	}
2484 	complete_deferred_ios();
2485 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2486 	CU_ASSERT(raid_io->offset_blocks == 0);
2487 	CU_ASSERT(raid_io->iovcnt == 4);
2488 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2489 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2490 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2491 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2492 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2493 	}
2494 
2495 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2496 	CU_ASSERT(g_io_output_index == 2);
2497 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2498 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2499 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2500 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2501 
2502 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2503 	g_io_output_index = 0;
2504 
2505 	split_offset = g_strip_size / 2; /* split exactly between second and third iovec */
2506 	raid_ch->process.offset = split_offset;
2507 	raid_bdev_submit_request(ch, bdev_io);
2508 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2509 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2510 	CU_ASSERT(raid_io->iovcnt == 2);
2511 	CU_ASSERT(raid_io->split.iov == NULL);
2512 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2513 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2514 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2515 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2516 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2517 	}
2518 	complete_deferred_ios();
2519 	CU_ASSERT(raid_io->num_blocks == split_offset);
2520 	CU_ASSERT(raid_io->offset_blocks == 0);
2521 	CU_ASSERT(raid_io->iovcnt == 2);
2522 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2523 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2524 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2525 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2526 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2527 	}
2528 	complete_deferred_ios();
2529 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2530 	CU_ASSERT(raid_io->offset_blocks == 0);
2531 	CU_ASSERT(raid_io->iovcnt == 4);
2532 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2533 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2534 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2535 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2536 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2537 	}
2538 
2539 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2540 	CU_ASSERT(g_io_output_index == 2);
2541 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2542 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2543 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2544 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2545 
2546 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2547 	g_io_output_index = 0;
2548 
2549 	split_offset = g_strip_size / 2 + 1; /* split at the third iovec */
2550 	raid_ch->process.offset = split_offset;
2551 	raid_bdev_submit_request(ch, bdev_io);
2552 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2553 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2554 	CU_ASSERT(raid_io->iovcnt == 2);
2555 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]);
2556 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2557 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len);
2558 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len);
2559 	CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base);
2560 	CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len);
2561 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2562 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2563 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2564 	}
2565 	complete_deferred_ios();
2566 	CU_ASSERT(raid_io->num_blocks == split_offset);
2567 	CU_ASSERT(raid_io->offset_blocks == 0);
2568 	CU_ASSERT(raid_io->iovcnt == 3);
2569 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2570 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0);
2571 	CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base);
2572 	CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len);
2573 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2574 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2575 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2576 	}
2577 	complete_deferred_ios();
2578 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2579 	CU_ASSERT(raid_io->offset_blocks == 0);
2580 	CU_ASSERT(raid_io->iovcnt == 4);
2581 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2582 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2583 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2584 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2585 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2586 	}
2587 
2588 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2589 	CU_ASSERT(g_io_output_index == 2);
2590 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2591 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2592 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2593 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2594 
2595 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2596 	g_io_output_index = 0;
2597 
2598 	split_offset = g_strip_size - 1; /* split at the last iovec */
2599 	raid_ch->process.offset = split_offset;
2600 	raid_bdev_submit_request(ch, bdev_io);
2601 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2602 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2603 	CU_ASSERT(raid_io->iovcnt == 1);
2604 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]);
2605 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]);
2606 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len);
2607 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2608 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2609 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2610 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2611 	}
2612 	complete_deferred_ios();
2613 	CU_ASSERT(raid_io->num_blocks == split_offset);
2614 	CU_ASSERT(raid_io->offset_blocks == 0);
2615 	CU_ASSERT(raid_io->iovcnt == 4);
2616 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2617 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0);
2618 	CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base);
2619 	CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len);
2620 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2621 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2622 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2623 	}
2624 	complete_deferred_ios();
2625 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2626 	CU_ASSERT(raid_io->offset_blocks == 0);
2627 	CU_ASSERT(raid_io->iovcnt == 4);
2628 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2629 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2630 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2631 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2632 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2633 	}
2634 
2635 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2636 	CU_ASSERT(g_io_output_index == 2);
2637 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2638 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2639 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2640 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2641 	bdev_io_cleanup(bdev_io);
2642 
2643 	spdk_put_io_channel(ch);
2644 	free_test_req(&req);
2645 	pbdev->process = NULL;
2646 
2647 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2648 	rpc_bdev_raid_delete(NULL, NULL);
2649 	CU_ASSERT(g_rpc_err == 0);
2650 	verify_raid_bdev_present("raid1", false);
2651 
2652 	raid_bdev_exit();
2653 	base_bdevs_cleanup();
2654 	reset_globals();
2655 }
2656 
2657 static int
2658 test_new_thread_fn(struct spdk_thread *thread)
2659 {
2660 	g_latest_thread = thread;
2661 
2662 	return 0;
2663 }
2664 
2665 static int
2666 test_bdev_ioch_create(void *io_device, void *ctx_buf)
2667 {
2668 	return 0;
2669 }
2670 
2671 static void
2672 test_bdev_ioch_destroy(void *io_device, void *ctx_buf)
2673 {
2674 }
2675 
2676 int
2677 main(int argc, char **argv)
2678 {
2679 	unsigned int    num_failures;
2680 
2681 	CU_TestInfo tests[] = {
2682 		{ "test_create_raid", test_create_raid },
2683 		{ "test_create_raid_superblock", test_create_raid_superblock },
2684 		{ "test_delete_raid", test_delete_raid },
2685 		{ "test_create_raid_invalid_args", test_create_raid_invalid_args },
2686 		{ "test_delete_raid_invalid_args", test_delete_raid_invalid_args },
2687 		{ "test_io_channel", test_io_channel },
2688 		{ "test_reset_io", test_reset_io },
2689 		{ "test_write_io", test_write_io },
2690 		{ "test_read_io", test_read_io },
2691 		{ "test_unmap_io", test_unmap_io },
2692 		{ "test_io_failure", test_io_failure },
2693 		{ "test_multi_raid_no_io", test_multi_raid_no_io },
2694 		{ "test_multi_raid_with_io", test_multi_raid_with_io },
2695 		{ "test_io_type_supported", test_io_type_supported },
2696 		{ "test_raid_json_dump_info", test_raid_json_dump_info },
2697 		{ "test_context_size", test_context_size },
2698 		{ "test_raid_level_conversions", test_raid_level_conversions },
2699 		{ "test_raid_io_split", test_raid_io_split },
2700 		{ "test_raid_process", test_raid_process },
2701 		CU_TEST_INFO_NULL,
2702 	};
2703 	CU_SuiteInfo suites[] = {
2704 		{ "raid", set_test_opts, NULL, NULL, NULL, tests },
2705 		{ "raid_dif", set_test_opts_dif, NULL, NULL, NULL, tests },
2706 		CU_SUITE_INFO_NULL,
2707 	};
2708 
2709 	CU_initialize_registry();
2710 	CU_register_suites(suites);
2711 
2712 	spdk_thread_lib_init(test_new_thread_fn, 0);
2713 	g_app_thread = spdk_thread_create("app_thread", NULL);
2714 	spdk_set_thread(g_app_thread);
2715 	spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0,
2716 				NULL);
2717 
2718 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2719 	CU_cleanup_registry();
2720 
2721 	spdk_io_device_unregister(&g_bdev_ch_io_device, NULL);
2722 	spdk_thread_exit(g_app_thread);
2723 	spdk_thread_poll(g_app_thread, 0, 0);
2724 	spdk_thread_destroy(g_app_thread);
2725 	spdk_thread_lib_fini();
2726 
2727 	return num_failures;
2728 }
2729