xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision 06472fb6d0c234046253a9989fef790e0cbb219e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "bdev/raid/raid0.c"
15 #include "common/lib/ut_multithread.c"
16 
17 #define MAX_BASE_DRIVES 32
18 #define MAX_RAIDS 2
19 #define INVALID_IO_SUBMIT 0xFFFF
20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
22 #define MD_SIZE 8
23 
24 struct spdk_bdev_channel {
25 	struct spdk_io_channel *channel;
26 };
27 
28 struct spdk_bdev_desc {
29 	struct spdk_bdev *bdev;
30 };
31 
32 /* Data structure to capture the output of IO for verification */
33 struct io_output {
34 	struct spdk_bdev_desc       *desc;
35 	struct spdk_io_channel      *ch;
36 	uint64_t                    offset_blocks;
37 	uint64_t                    num_blocks;
38 	spdk_bdev_io_completion_cb  cb;
39 	void                        *cb_arg;
40 	enum spdk_bdev_io_type      iotype;
41 	struct iovec                *iovs;
42 	int                         iovcnt;
43 	void                        *md_buf;
44 };
45 
46 struct raid_io_ranges {
47 	uint64_t lba;
48 	uint64_t nblocks;
49 };
50 
51 /* Globals */
52 int g_bdev_io_submit_status;
53 struct io_output *g_io_output = NULL;
54 uint32_t g_io_output_index;
55 uint32_t g_io_comp_status;
56 bool g_child_io_status_flag;
57 void *g_rpc_req;
58 uint32_t g_rpc_req_size;
59 TAILQ_HEAD(bdev, spdk_bdev);
60 struct bdev g_bdev_list;
61 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
62 struct waitq g_io_waitq;
63 uint32_t g_block_len;
64 uint32_t g_strip_size;
65 uint32_t g_max_io_size;
66 uint8_t g_max_base_drives;
67 uint8_t g_max_raids;
68 uint8_t g_ignore_io_output;
69 uint8_t g_rpc_err;
70 char *g_get_raids_output[MAX_RAIDS];
71 uint32_t g_get_raids_count;
72 uint8_t g_json_decode_obj_err;
73 uint8_t g_json_decode_obj_create;
74 uint8_t g_config_level_create = 0;
75 uint8_t g_test_multi_raids;
76 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
77 uint32_t g_io_range_idx;
78 uint64_t g_lba_offset;
79 uint64_t g_bdev_ch_io_device;
80 bool g_bdev_io_defer_completion;
81 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
82 bool g_enable_dif;
83 
84 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
85 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
86 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
87 		enum spdk_bdev_io_type io_type), true);
88 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
89 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
90 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
91 		void *cb_arg), 0);
92 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
93 	    NULL);
94 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
95 		uint32_t state_mask));
96 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
97 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
98 					struct spdk_json_write_ctx *w));
99 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
100 		bool value));
101 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
102 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
103 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
104 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
105 		spdk_json_decode_fn decode_func,
106 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
107 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
108 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
109 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
110 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
111 		const char *name), 0);
112 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
113 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
114 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
115 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
116 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
117 		const char *name), 0);
118 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
119 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
120 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
121 		uint64_t val), 0);
122 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
123 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
124 		struct spdk_bdev_io_wait_entry *entry), 0);
125 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
126 		struct spdk_memory_domain **domains,	int array_size), 0);
127 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
128 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
129 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
130 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
131 DEFINE_STUB(raid_bdev_alloc_superblock, int, (struct raid_bdev *raid_bdev, uint32_t block_size), 0);
132 DEFINE_STUB_V(raid_bdev_free_superblock, (struct raid_bdev *raid_bdev));
133 
134 
135 uint32_t
136 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
137 {
138 	return g_block_len;
139 }
140 
141 typedef enum spdk_dif_type spdk_dif_type_t;
142 
143 spdk_dif_type_t
144 spdk_bdev_get_dif_type(const struct spdk_bdev *bdev)
145 {
146 	if (bdev->md_len != 0) {
147 		return bdev->dif_type;
148 	} else {
149 		return SPDK_DIF_DISABLE;
150 	}
151 }
152 
153 bool
154 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
155 {
156 	return (bdev->md_len != 0) && bdev->md_interleave;
157 }
158 
159 bool
160 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
161 {
162 	return (bdev->md_len != 0) && !bdev->md_interleave;
163 }
164 
165 uint32_t
166 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
167 {
168 	return bdev->md_len;
169 }
170 
171 uint32_t
172 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
173 {
174 	return bdev->blocklen;
175 }
176 
177 int
178 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
179 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
180 {
181 	cb(NULL, -EINVAL, cb_ctx);
182 
183 	return 0;
184 }
185 
186 void
187 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
188 {
189 	cb(0, raid_bdev, cb_ctx);
190 }
191 
192 const struct spdk_uuid *
193 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
194 {
195 	return &bdev->uuid;
196 }
197 
198 struct spdk_io_channel *
199 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
200 {
201 	return spdk_get_io_channel(&g_bdev_ch_io_device);
202 }
203 
204 static int
205 set_test_opts(void)
206 {
207 
208 	g_max_base_drives = MAX_BASE_DRIVES;
209 	g_max_raids = MAX_RAIDS;
210 	g_block_len = 4096;
211 	g_strip_size = 64;
212 	g_max_io_size = 1024;
213 	g_enable_dif = false;
214 
215 	printf("Test Options\n");
216 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
217 	       "g_max_raids = %u, g_enable_dif = %d\n",
218 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids,
219 	       g_enable_dif);
220 
221 	return 0;
222 }
223 
224 static int
225 set_test_opts_dif(void)
226 {
227 
228 	g_max_base_drives = MAX_BASE_DRIVES;
229 	g_max_raids = MAX_RAIDS;
230 	g_block_len = 4096;
231 	g_strip_size = 64;
232 	g_max_io_size = 1024;
233 	g_enable_dif = true;
234 
235 	printf("Test Options\n");
236 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
237 	       "g_max_raids = %u, g_enable_dif = %d\n",
238 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids,
239 	       g_enable_dif);
240 
241 	return 0;
242 }
243 
244 /* Set globals before every test run */
245 static void
246 set_globals(void)
247 {
248 	uint32_t max_splits;
249 
250 	g_bdev_io_submit_status = 0;
251 	if (g_max_io_size < g_strip_size) {
252 		max_splits = 2;
253 	} else {
254 		max_splits = (g_max_io_size / g_strip_size) + 1;
255 	}
256 	if (max_splits < g_max_base_drives) {
257 		max_splits = g_max_base_drives;
258 	}
259 
260 	g_io_output = calloc(max_splits, sizeof(struct io_output));
261 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
262 	g_io_output_index = 0;
263 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
264 	g_get_raids_count = 0;
265 	g_io_comp_status = 0;
266 	g_ignore_io_output = 0;
267 	g_config_level_create = 0;
268 	g_rpc_err = 0;
269 	g_test_multi_raids = 0;
270 	g_child_io_status_flag = true;
271 	TAILQ_INIT(&g_bdev_list);
272 	TAILQ_INIT(&g_io_waitq);
273 	g_rpc_req = NULL;
274 	g_rpc_req_size = 0;
275 	g_json_decode_obj_err = 0;
276 	g_json_decode_obj_create = 0;
277 	g_lba_offset = 0;
278 	g_bdev_io_defer_completion = false;
279 }
280 
281 static void
282 base_bdevs_cleanup(void)
283 {
284 	struct spdk_bdev *bdev;
285 	struct spdk_bdev *bdev_next;
286 
287 	if (!TAILQ_EMPTY(&g_bdev_list)) {
288 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
289 			free(bdev->name);
290 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
291 			free(bdev);
292 		}
293 	}
294 }
295 
296 static void
297 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
298 {
299 	struct raid_base_bdev_info *base_info;
300 
301 	assert(raid_bdev != NULL);
302 	assert(raid_bdev->base_bdev_info != NULL);
303 
304 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
305 		if (base_info->desc) {
306 			raid_bdev_free_base_bdev_resource(base_info);
307 		}
308 	}
309 	assert(raid_bdev->num_base_bdevs_discovered == 0);
310 	raid_bdev_cleanup_and_free(raid_bdev);
311 }
312 
313 /* Reset globals */
314 static void
315 reset_globals(void)
316 {
317 	if (g_io_output) {
318 		free(g_io_output);
319 		g_io_output = NULL;
320 	}
321 	g_rpc_req = NULL;
322 	g_rpc_req_size = 0;
323 }
324 
325 void
326 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
327 		     uint64_t len)
328 {
329 	cb(bdev_io->internal.ch->channel, bdev_io, true);
330 }
331 
332 static void
333 generate_dif(struct iovec *iovs, int iovcnt, void *md_buf,
334 	     uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
335 {
336 	struct spdk_dif_ctx dif_ctx;
337 	int rc;
338 	struct spdk_dif_ctx_init_ext_opts dif_opts;
339 	spdk_dif_type_t dif_type;
340 	bool md_interleaved;
341 	struct iovec md_iov;
342 
343 	dif_type = spdk_bdev_get_dif_type(bdev);
344 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
345 
346 	if (dif_type == SPDK_DIF_DISABLE) {
347 		return;
348 	}
349 
350 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
351 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
352 	rc = spdk_dif_ctx_init(&dif_ctx,
353 			       spdk_bdev_get_block_size(bdev),
354 			       spdk_bdev_get_md_size(bdev),
355 			       md_interleaved,
356 			       spdk_bdev_is_dif_head_of_md(bdev),
357 			       dif_type,
358 			       bdev->dif_check_flags,
359 			       offset_blocks,
360 			       0xFFFF, 0x123, 0, 0, &dif_opts);
361 	SPDK_CU_ASSERT_FATAL(rc == 0);
362 
363 	if (!md_interleaved) {
364 		md_iov.iov_base = md_buf;
365 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
366 
367 		rc = spdk_dix_generate(iovs, iovcnt, &md_iov, num_blocks, &dif_ctx);
368 		SPDK_CU_ASSERT_FATAL(rc == 0);
369 	}
370 }
371 
372 static void
373 verify_dif(struct iovec *iovs, int iovcnt, void *md_buf,
374 	   uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
375 {
376 	struct spdk_dif_ctx dif_ctx;
377 	int rc;
378 	struct spdk_dif_ctx_init_ext_opts dif_opts;
379 	struct spdk_dif_error errblk;
380 	spdk_dif_type_t dif_type;
381 	bool md_interleaved;
382 	struct iovec md_iov;
383 
384 	dif_type = spdk_bdev_get_dif_type(bdev);
385 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
386 
387 	if (dif_type == SPDK_DIF_DISABLE) {
388 		return;
389 	}
390 
391 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
392 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
393 	rc = spdk_dif_ctx_init(&dif_ctx,
394 			       spdk_bdev_get_block_size(bdev),
395 			       spdk_bdev_get_md_size(bdev),
396 			       md_interleaved,
397 			       spdk_bdev_is_dif_head_of_md(bdev),
398 			       dif_type,
399 			       bdev->dif_check_flags,
400 			       offset_blocks,
401 			       0xFFFF, 0x123, 0, 0, &dif_opts);
402 	SPDK_CU_ASSERT_FATAL(rc == 0);
403 
404 	if (!md_interleaved) {
405 		md_iov.iov_base = md_buf;
406 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
407 
408 		rc = spdk_dix_verify(iovs, iovcnt,
409 				     &md_iov, num_blocks, &dif_ctx, &errblk);
410 		SPDK_CU_ASSERT_FATAL(rc == 0);
411 	}
412 }
413 
414 /* Store the IO completion status in global variable to verify by various tests */
415 void
416 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
417 {
418 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
419 
420 	if (g_io_comp_status && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
421 		verify_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
422 			   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
423 	}
424 }
425 
426 static void
427 set_io_output(struct io_output *output,
428 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
429 	      uint64_t offset_blocks, uint64_t num_blocks,
430 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
431 	      enum spdk_bdev_io_type iotype, struct iovec *iovs,
432 	      int iovcnt, void *md)
433 {
434 	output->desc = desc;
435 	output->ch = ch;
436 	output->offset_blocks = offset_blocks;
437 	output->num_blocks = num_blocks;
438 	output->cb = cb;
439 	output->cb_arg = cb_arg;
440 	output->iotype = iotype;
441 	output->iovs = iovs;
442 	output->iovcnt = iovcnt;
443 	output->md_buf = md;
444 }
445 
446 static void
447 child_io_complete(struct spdk_bdev_io *child_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
448 {
449 	if (g_bdev_io_defer_completion) {
450 		child_io->internal.cb = cb;
451 		child_io->internal.caller_ctx = cb_arg;
452 		TAILQ_INSERT_TAIL(&g_deferred_ios, child_io, internal.link);
453 	} else {
454 		cb(child_io, g_child_io_status_flag, cb_arg);
455 	}
456 }
457 
458 static void
459 complete_deferred_ios(void)
460 {
461 	struct spdk_bdev_io *child_io, *tmp;
462 
463 	TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) {
464 		TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
465 		child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
466 	}
467 }
468 
469 /* It will cache the split IOs for verification */
470 int
471 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
472 			struct iovec *iov, int iovcnt,
473 			uint64_t offset_blocks, uint64_t num_blocks,
474 			spdk_bdev_io_completion_cb cb, void *cb_arg)
475 {
476 	return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
477 					   num_blocks, cb, cb_arg, NULL);
478 }
479 
480 int
481 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
482 			    struct iovec *iov, int iovcnt,
483 			    uint64_t offset_blocks, uint64_t num_blocks,
484 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
485 			    struct spdk_bdev_ext_io_opts *opts)
486 {
487 	struct io_output *output = &g_io_output[g_io_output_index];
488 	struct spdk_bdev_io *child_io;
489 
490 	if (g_ignore_io_output) {
491 		return 0;
492 	}
493 
494 	if (g_max_io_size < g_strip_size) {
495 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
496 	} else {
497 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
498 	}
499 	if (g_bdev_io_submit_status == 0) {
500 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
501 			      SPDK_BDEV_IO_TYPE_WRITE, iov, iovcnt, opts->metadata);
502 		g_io_output_index++;
503 
504 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
505 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
506 		child_io_complete(child_io, cb, cb_arg);
507 	}
508 
509 	return g_bdev_io_submit_status;
510 }
511 
512 int
513 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
514 				struct iovec *iov, int iovcnt, void *md,
515 				uint64_t offset_blocks, uint64_t num_blocks,
516 				spdk_bdev_io_completion_cb cb, void *cb_arg)
517 {
518 	struct spdk_bdev_ext_io_opts opts = {
519 		.metadata = md
520 	};
521 
522 	return spdk_bdev_writev_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
523 					   num_blocks, cb, cb_arg, &opts);
524 }
525 
526 int
527 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
528 		spdk_bdev_io_completion_cb cb, void *cb_arg)
529 {
530 	struct io_output *output = &g_io_output[g_io_output_index];
531 	struct spdk_bdev_io *child_io;
532 
533 	if (g_ignore_io_output) {
534 		return 0;
535 	}
536 
537 	if (g_bdev_io_submit_status == 0) {
538 		set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET,
539 			      NULL, 0, NULL);
540 		g_io_output_index++;
541 
542 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
543 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
544 		child_io_complete(child_io, cb, cb_arg);
545 	}
546 
547 	return g_bdev_io_submit_status;
548 }
549 
550 int
551 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
552 		       uint64_t offset_blocks, uint64_t num_blocks,
553 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
554 {
555 	struct io_output *output = &g_io_output[g_io_output_index];
556 	struct spdk_bdev_io *child_io;
557 
558 	if (g_ignore_io_output) {
559 		return 0;
560 	}
561 
562 	if (g_bdev_io_submit_status == 0) {
563 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
564 			      SPDK_BDEV_IO_TYPE_UNMAP, NULL, 0, NULL);
565 		g_io_output_index++;
566 
567 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
568 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
569 		child_io_complete(child_io, cb, cb_arg);
570 	}
571 
572 	return g_bdev_io_submit_status;
573 }
574 
575 void
576 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
577 {
578 	CU_ASSERT(bdeverrno == 0);
579 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
580 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
581 }
582 
583 int
584 spdk_bdev_register(struct spdk_bdev *bdev)
585 {
586 	TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
587 	return 0;
588 }
589 
590 void
591 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
592 {
593 	int ret;
594 
595 	SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev);
596 	TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
597 
598 	bdev->internal.unregister_cb = cb_fn;
599 	bdev->internal.unregister_ctx = cb_arg;
600 
601 	ret = bdev->fn_table->destruct(bdev->ctxt);
602 	CU_ASSERT(ret == 1);
603 
604 	poll_threads();
605 }
606 
607 int
608 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
609 		   void *event_ctx, struct spdk_bdev_desc **_desc)
610 {
611 	struct spdk_bdev *bdev;
612 
613 	bdev = spdk_bdev_get_by_name(bdev_name);
614 	if (bdev == NULL) {
615 		return -ENODEV;
616 	}
617 
618 	*_desc = (void *)bdev;
619 	return 0;
620 }
621 
622 struct spdk_bdev *
623 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
624 {
625 	return (void *)desc;
626 }
627 
628 int
629 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
630 {
631 	if (!g_test_multi_raids) {
632 		struct rpc_bdev_raid_create *req = g_rpc_req;
633 		if (strcmp(name, "strip_size_kb") == 0) {
634 			CU_ASSERT(req->strip_size_kb == val);
635 		} else if (strcmp(name, "blocklen_shift") == 0) {
636 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
637 		} else if (strcmp(name, "num_base_bdevs") == 0) {
638 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
639 		} else if (strcmp(name, "state") == 0) {
640 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
641 		} else if (strcmp(name, "destruct_called") == 0) {
642 			CU_ASSERT(val == 0);
643 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
644 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
645 		}
646 	}
647 	return 0;
648 }
649 
650 int
651 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
652 {
653 	if (g_test_multi_raids) {
654 		if (strcmp(name, "name") == 0) {
655 			g_get_raids_output[g_get_raids_count] = strdup(val);
656 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
657 			g_get_raids_count++;
658 		}
659 	} else {
660 		struct rpc_bdev_raid_create *req = g_rpc_req;
661 		if (strcmp(name, "raid_level") == 0) {
662 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
663 		}
664 	}
665 	return 0;
666 }
667 
668 int
669 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
670 {
671 	if (!g_test_multi_raids) {
672 		struct rpc_bdev_raid_create *req = g_rpc_req;
673 		if (strcmp(name, "superblock") == 0) {
674 			CU_ASSERT(val == req->superblock_enabled);
675 		}
676 	}
677 	return 0;
678 }
679 
680 void
681 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
682 {
683 	if (bdev_io) {
684 		free(bdev_io);
685 	}
686 }
687 
688 /* It will cache split IOs for verification */
689 int
690 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
691 		       struct iovec *iov, int iovcnt,
692 		       uint64_t offset_blocks, uint64_t num_blocks,
693 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
694 {
695 	return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
696 					  num_blocks, cb, cb_arg, NULL);
697 }
698 
699 int
700 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
701 			   struct iovec *iov, int iovcnt,
702 			   uint64_t offset_blocks, uint64_t num_blocks,
703 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
704 			   struct spdk_bdev_ext_io_opts *opts)
705 {
706 	struct io_output *output = &g_io_output[g_io_output_index];
707 	struct spdk_bdev_io *child_io;
708 
709 	if (g_ignore_io_output) {
710 		return 0;
711 	}
712 
713 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
714 	if (g_bdev_io_submit_status == 0) {
715 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
716 			      SPDK_BDEV_IO_TYPE_READ, iov, iovcnt, opts->metadata);
717 		generate_dif(iov, iovcnt, opts->metadata, offset_blocks, num_blocks,
718 			     spdk_bdev_desc_get_bdev(desc));
719 		g_io_output_index++;
720 
721 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
722 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
723 		child_io_complete(child_io, cb, cb_arg);
724 	}
725 
726 	return g_bdev_io_submit_status;
727 }
728 
729 int
730 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc,	struct spdk_io_channel *ch,
731 			       struct iovec *iov, int iovcnt, void *md,
732 			       uint64_t offset_blocks, uint64_t num_blocks,
733 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
734 {
735 	struct spdk_bdev_ext_io_opts opts = {
736 		.metadata = md
737 	};
738 
739 	return spdk_bdev_readv_blocks_ext(desc, ch, iov, iovcnt, offset_blocks,
740 					  num_blocks, cb, cb_arg, &opts);
741 }
742 
743 
744 void
745 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
746 {
747 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
748 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
749 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
750 	bdev->internal.claim.v1.module = NULL;
751 }
752 
753 int
754 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
755 			    struct spdk_bdev_module *module)
756 {
757 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
758 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
759 		return -1;
760 	}
761 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
762 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
763 	bdev->internal.claim.v1.module = module;
764 	return 0;
765 }
766 
767 int
768 spdk_json_decode_object(const struct spdk_json_val *values,
769 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
770 			void *out)
771 {
772 	struct rpc_bdev_raid_create *req, *_out;
773 	size_t i;
774 
775 	if (g_json_decode_obj_err) {
776 		return -1;
777 	} else if (g_json_decode_obj_create) {
778 		req = g_rpc_req;
779 		_out = out;
780 
781 		_out->name = strdup(req->name);
782 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
783 		_out->strip_size_kb = req->strip_size_kb;
784 		_out->level = req->level;
785 		_out->superblock_enabled = req->superblock_enabled;
786 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
787 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
788 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
789 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
790 		}
791 	} else {
792 		memcpy(out, g_rpc_req, g_rpc_req_size);
793 	}
794 
795 	return 0;
796 }
797 
798 struct spdk_json_write_ctx *
799 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
800 {
801 	return (void *)1;
802 }
803 
804 void
805 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
806 				 int error_code, const char *msg)
807 {
808 	g_rpc_err = 1;
809 }
810 
811 void
812 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
813 				     int error_code, const char *fmt, ...)
814 {
815 	g_rpc_err = 1;
816 }
817 
818 struct spdk_bdev *
819 spdk_bdev_get_by_name(const char *bdev_name)
820 {
821 	struct spdk_bdev *bdev;
822 
823 	if (!TAILQ_EMPTY(&g_bdev_list)) {
824 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
825 			if (strcmp(bdev_name, bdev->name) == 0) {
826 				return bdev;
827 			}
828 		}
829 	}
830 
831 	return NULL;
832 }
833 
834 int
835 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
836 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
837 {
838 	if (cb_fn) {
839 		cb_fn(cb_arg, 0);
840 	}
841 
842 	return 0;
843 }
844 
845 int
846 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
847 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
848 {
849 	if (cb_fn) {
850 		cb_fn(cb_arg, 0);
851 	}
852 
853 	return 0;
854 }
855 
856 int
857 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
858 			uint64_t offset, uint64_t length,
859 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
860 {
861 	if (cb_fn) {
862 		cb_fn(cb_arg, 0);
863 	}
864 
865 	return 0;
866 }
867 
868 int
869 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
870 			  uint64_t offset, uint64_t length,
871 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
872 {
873 	if (cb_fn) {
874 		cb_fn(cb_arg, 0);
875 	}
876 
877 	return 0;
878 }
879 
880 static void
881 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
882 {
883 	if (bdev_io->u.bdev.iovs) {
884 		int i;
885 
886 		for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
887 			free(bdev_io->u.bdev.iovs[i].iov_base);
888 		}
889 		free(bdev_io->u.bdev.iovs);
890 	}
891 
892 	free(bdev_io->u.bdev.md_buf);
893 	free(bdev_io);
894 }
895 
896 static void
897 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch,
898 		    struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype,
899 		    int iovcnt, size_t iov_len)
900 {
901 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
902 	int i;
903 
904 	bdev_io->bdev = bdev;
905 	bdev_io->u.bdev.offset_blocks = lba;
906 	bdev_io->u.bdev.num_blocks = blocks;
907 	bdev_io->type = iotype;
908 	bdev_io->internal.ch = channel;
909 	bdev_io->u.bdev.iovcnt = iovcnt;
910 
911 	if (iovcnt == 0) {
912 		bdev_io->u.bdev.iovs = NULL;
913 		bdev_io->u.bdev.md_buf = NULL;
914 		return;
915 	}
916 
917 	SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len);
918 
919 	bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec));
920 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
921 
922 	for (i = 0; i < iovcnt; i++) {
923 		struct iovec *iov = &bdev_io->u.bdev.iovs[i];
924 
925 		iov->iov_base = calloc(1, iov_len);
926 		SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
927 		iov->iov_len = iov_len;
928 	}
929 
930 	if (spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE && !spdk_bdev_is_md_interleaved(bdev)) {
931 		bdev_io->u.bdev.md_buf = calloc(1, blocks * spdk_bdev_get_md_size(bdev));
932 	}
933 }
934 
935 static void
936 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
937 		   uint64_t lba, uint64_t blocks, int16_t iotype)
938 {
939 	int iovcnt;
940 	size_t iov_len;
941 
942 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
943 		iovcnt = 0;
944 		iov_len = 0;
945 	} else {
946 		iovcnt = 1;
947 		iov_len = blocks * g_block_len;
948 	}
949 
950 	_bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, iovcnt, iov_len);
951 }
952 
953 static void
954 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
955 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
956 {
957 	uint8_t index = 0;
958 	struct io_output *output;
959 
960 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
961 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
962 	SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
963 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
964 
965 	CU_ASSERT(g_io_output_index == num_base_drives);
966 	for (index = 0; index < g_io_output_index; index++) {
967 		output = &g_io_output[index];
968 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
969 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
970 		CU_ASSERT(bdev_io->type == output->iotype);
971 	}
972 	CU_ASSERT(g_io_comp_status == io_status);
973 }
974 
975 static void
976 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
977 	  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
978 {
979 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
980 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
981 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
982 			     strip_shift;
983 	uint32_t splits_reqd = (end_strip - start_strip + 1);
984 	uint32_t strip;
985 	uint64_t pd_strip;
986 	uint8_t pd_idx;
987 	uint32_t offset_in_strip;
988 	uint64_t pd_lba;
989 	uint64_t pd_blocks;
990 	uint32_t index = 0;
991 	struct io_output *output;
992 
993 	if (io_status == INVALID_IO_SUBMIT) {
994 		CU_ASSERT(g_io_comp_status == false);
995 		return;
996 	}
997 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
998 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
999 
1000 	CU_ASSERT(splits_reqd == g_io_output_index);
1001 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
1002 		pd_strip = strip / num_base_drives;
1003 		pd_idx = strip % num_base_drives;
1004 		if (strip == start_strip) {
1005 			offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
1006 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
1007 			if (strip == end_strip) {
1008 				pd_blocks = bdev_io->u.bdev.num_blocks;
1009 			} else {
1010 				pd_blocks = g_strip_size - offset_in_strip;
1011 			}
1012 		} else if (strip == end_strip) {
1013 			pd_lba = pd_strip << strip_shift;
1014 			pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
1015 				     (g_strip_size - 1)) + 1;
1016 		} else {
1017 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
1018 			pd_blocks = raid_bdev->strip_size;
1019 		}
1020 		output = &g_io_output[index];
1021 		CU_ASSERT(pd_lba == output->offset_blocks);
1022 		CU_ASSERT(pd_blocks == output->num_blocks);
1023 		CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
1024 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
1025 		CU_ASSERT(bdev_io->type == output->iotype);
1026 		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
1027 			verify_dif(output->iovs, output->iovcnt, output->md_buf,
1028 				   output->offset_blocks, output->num_blocks,
1029 				   spdk_bdev_desc_get_bdev(raid_bdev->base_bdev_info[pd_idx].desc));
1030 		}
1031 	}
1032 	CU_ASSERT(g_io_comp_status == io_status);
1033 }
1034 
1035 static void
1036 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
1037 			  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
1038 			  uint32_t io_status)
1039 {
1040 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
1041 	uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
1042 	uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
1043 				       g_strip_size;
1044 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
1045 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
1046 			     strip_shift;
1047 	uint8_t n_disks_involved;
1048 	uint64_t start_strip_disk_idx;
1049 	uint64_t end_strip_disk_idx;
1050 	uint64_t nblocks_in_start_disk;
1051 	uint64_t offset_in_start_disk;
1052 	uint8_t disk_idx;
1053 	uint64_t base_io_idx;
1054 	uint64_t sum_nblocks = 0;
1055 	struct io_output *output;
1056 
1057 	if (io_status == INVALID_IO_SUBMIT) {
1058 		CU_ASSERT(g_io_comp_status == false);
1059 		return;
1060 	}
1061 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1062 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
1063 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
1064 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
1065 
1066 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
1067 	CU_ASSERT(n_disks_involved == g_io_output_index);
1068 
1069 	start_strip_disk_idx = start_strip % num_base_drives;
1070 	end_strip_disk_idx = end_strip % num_base_drives;
1071 
1072 	offset_in_start_disk = g_io_output[0].offset_blocks;
1073 	nblocks_in_start_disk = g_io_output[0].num_blocks;
1074 
1075 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
1076 	     base_io_idx++, disk_idx++) {
1077 		uint64_t start_offset_in_disk;
1078 		uint64_t end_offset_in_disk;
1079 
1080 		output = &g_io_output[base_io_idx];
1081 
1082 		/* round disk_idx */
1083 		if (disk_idx >= num_base_drives) {
1084 			disk_idx %= num_base_drives;
1085 		}
1086 
1087 		/* start_offset_in_disk aligned in strip check:
1088 		 * The first base io has a same start_offset_in_strip with the whole raid io.
1089 		 * Other base io should have aligned start_offset_in_strip which is 0.
1090 		 */
1091 		start_offset_in_disk = output->offset_blocks;
1092 		if (base_io_idx == 0) {
1093 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
1094 		} else {
1095 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
1096 		}
1097 
1098 		/* end_offset_in_disk aligned in strip check:
1099 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
1100 		 * with the whole raid io.
1101 		 * Other base io should have aligned end_offset_in_strip.
1102 		 */
1103 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
1104 		if (disk_idx == end_strip_disk_idx) {
1105 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
1106 		} else {
1107 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
1108 		}
1109 
1110 		/* start_offset_in_disk compared with start_disk.
1111 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
1112 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
1113 		 *    must be less than strip size.
1114 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
1115 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
1116 		 *    be less than strip size.
1117 		 */
1118 		if (disk_idx > start_strip_disk_idx) {
1119 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
1120 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
1121 		} else if (disk_idx < start_strip_disk_idx) {
1122 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
1123 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
1124 		}
1125 
1126 		/* nblocks compared with start_disk:
1127 		 * The gap between them must be within a strip size.
1128 		 */
1129 		if (output->num_blocks <= nblocks_in_start_disk) {
1130 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
1131 		} else {
1132 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
1133 		}
1134 
1135 		sum_nblocks += output->num_blocks;
1136 
1137 		CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
1138 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
1139 		CU_ASSERT(bdev_io->type == output->iotype);
1140 	}
1141 
1142 	/* Sum of each nblocks should be same with raid bdev_io */
1143 	CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
1144 
1145 	CU_ASSERT(g_io_comp_status == io_status);
1146 }
1147 
1148 static void
1149 verify_raid_bdev_present(const char *name, bool presence)
1150 {
1151 	struct raid_bdev *pbdev;
1152 	bool   pbdev_found;
1153 
1154 	pbdev_found = false;
1155 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1156 		if (strcmp(pbdev->bdev.name, name) == 0) {
1157 			pbdev_found = true;
1158 			break;
1159 		}
1160 	}
1161 	if (presence == true) {
1162 		CU_ASSERT(pbdev_found == true);
1163 	} else {
1164 		CU_ASSERT(pbdev_found == false);
1165 	}
1166 }
1167 
1168 static void
1169 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
1170 {
1171 	struct raid_bdev *pbdev;
1172 	struct raid_base_bdev_info *base_info;
1173 	struct spdk_bdev *bdev = NULL;
1174 	bool   pbdev_found;
1175 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
1176 
1177 	pbdev_found = false;
1178 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1179 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
1180 			pbdev_found = true;
1181 			if (presence == false) {
1182 				break;
1183 			}
1184 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1185 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
1186 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
1187 					g_block_len)));
1188 			CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
1189 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
1190 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
1191 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
1192 			CU_ASSERT(pbdev->level == r->level);
1193 			CU_ASSERT(pbdev->base_bdev_info != NULL);
1194 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
1195 				CU_ASSERT(base_info->desc != NULL);
1196 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
1197 				CU_ASSERT(bdev != NULL);
1198 				CU_ASSERT(base_info->remove_scheduled == false);
1199 				CU_ASSERT((pbdev->superblock_enabled && base_info->data_offset != 0) ||
1200 					  (!pbdev->superblock_enabled && base_info->data_offset == 0));
1201 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
1202 
1203 				if (bdev && base_info->data_size < min_blockcnt) {
1204 					min_blockcnt = base_info->data_size;
1205 				}
1206 			}
1207 			CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
1208 				    (r->strip_size_kb * 1024 / g_block_len)) *
1209 				   r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
1210 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
1211 			CU_ASSERT(pbdev->bdev.write_cache == 0);
1212 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
1213 			if (pbdev->num_base_bdevs > 1) {
1214 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
1215 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
1216 			} else {
1217 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
1218 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
1219 			}
1220 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
1221 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
1222 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
1223 			break;
1224 		}
1225 	}
1226 	if (presence == true) {
1227 		CU_ASSERT(pbdev_found == true);
1228 	} else {
1229 		CU_ASSERT(pbdev_found == false);
1230 	}
1231 }
1232 
1233 static void
1234 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
1235 		 uint8_t g_max_raids,
1236 		 char **g_get_raids_output, uint32_t g_get_raids_count)
1237 {
1238 	uint8_t i, j;
1239 	bool found;
1240 
1241 	CU_ASSERT(g_max_raids == g_get_raids_count);
1242 	if (g_max_raids == g_get_raids_count) {
1243 		for (i = 0; i < g_max_raids; i++) {
1244 			found = false;
1245 			for (j = 0; j < g_max_raids; j++) {
1246 				if (construct_req[i].name &&
1247 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
1248 					found = true;
1249 					break;
1250 				}
1251 			}
1252 			CU_ASSERT(found == true);
1253 		}
1254 	}
1255 }
1256 
1257 static void
1258 create_base_bdevs(uint32_t bbdev_start_idx)
1259 {
1260 	uint8_t i;
1261 	struct spdk_bdev *base_bdev;
1262 	char name[16];
1263 
1264 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
1265 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
1266 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
1267 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
1268 		base_bdev->name = strdup(name);
1269 		spdk_uuid_generate(&base_bdev->uuid);
1270 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
1271 		base_bdev->blocklen = g_block_len;
1272 		base_bdev->blockcnt = BLOCK_CNT;
1273 		if (g_enable_dif) {
1274 			base_bdev->md_interleave = false;
1275 			base_bdev->md_len = MD_SIZE;
1276 			base_bdev->dif_check_flags =
1277 				SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK |
1278 				SPDK_DIF_FLAGS_APPTAG_CHECK;
1279 			base_bdev->dif_type = SPDK_DIF_TYPE1;
1280 		}
1281 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
1282 	}
1283 }
1284 
1285 static void
1286 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1287 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
1288 {
1289 	uint8_t i;
1290 	char name[16];
1291 	uint8_t bbdev_idx = bbdev_start_idx;
1292 
1293 	r->name = strdup(raid_name);
1294 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1295 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
1296 	r->level = RAID0;
1297 	r->superblock_enabled = superblock_enabled;
1298 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
1299 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
1300 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
1301 		r->base_bdevs.base_bdevs[i] = strdup(name);
1302 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
1303 	}
1304 	if (create_base_bdev == true) {
1305 		create_base_bdevs(bbdev_start_idx);
1306 	}
1307 	g_rpc_req = r;
1308 	g_rpc_req_size = sizeof(*r);
1309 }
1310 
1311 static void
1312 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1313 			    uint8_t bbdev_start_idx, bool create_base_bdev,
1314 			    uint8_t json_decode_obj_err, bool superblock_enabled)
1315 {
1316 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
1317 
1318 	g_rpc_err = 0;
1319 	g_json_decode_obj_create = 1;
1320 	g_json_decode_obj_err = json_decode_obj_err;
1321 	g_config_level_create = 0;
1322 	g_test_multi_raids = 0;
1323 }
1324 
1325 static void
1326 free_test_req(struct rpc_bdev_raid_create *r)
1327 {
1328 	uint8_t i;
1329 
1330 	free(r->name);
1331 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
1332 		free(r->base_bdevs.base_bdevs[i]);
1333 	}
1334 }
1335 
1336 static void
1337 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
1338 			    uint8_t json_decode_obj_err)
1339 {
1340 	r->name = strdup(raid_name);
1341 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1342 
1343 	g_rpc_req = r;
1344 	g_rpc_req_size = sizeof(*r);
1345 	g_rpc_err = 0;
1346 	g_json_decode_obj_create = 0;
1347 	g_json_decode_obj_err = json_decode_obj_err;
1348 	g_config_level_create = 0;
1349 	g_test_multi_raids = 0;
1350 }
1351 
1352 static void
1353 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
1354 		     uint8_t json_decode_obj_err)
1355 {
1356 	r->category = strdup(category);
1357 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
1358 
1359 	g_rpc_req = r;
1360 	g_rpc_req_size = sizeof(*r);
1361 	g_rpc_err = 0;
1362 	g_json_decode_obj_create = 0;
1363 	g_json_decode_obj_err = json_decode_obj_err;
1364 	g_config_level_create = 0;
1365 	g_test_multi_raids = 1;
1366 	g_get_raids_count = 0;
1367 }
1368 
1369 static void
1370 test_create_raid(void)
1371 {
1372 	struct rpc_bdev_raid_create req;
1373 	struct rpc_bdev_raid_delete delete_req;
1374 
1375 	set_globals();
1376 	CU_ASSERT(raid_bdev_init() == 0);
1377 
1378 	verify_raid_bdev_present("raid1", false);
1379 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1380 	rpc_bdev_raid_create(NULL, NULL);
1381 	CU_ASSERT(g_rpc_err == 0);
1382 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1383 	free_test_req(&req);
1384 
1385 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1386 	rpc_bdev_raid_delete(NULL, NULL);
1387 	CU_ASSERT(g_rpc_err == 0);
1388 	raid_bdev_exit();
1389 	base_bdevs_cleanup();
1390 	reset_globals();
1391 }
1392 
1393 static void
1394 test_delete_raid(void)
1395 {
1396 	struct rpc_bdev_raid_create construct_req;
1397 	struct rpc_bdev_raid_delete delete_req;
1398 
1399 	set_globals();
1400 	CU_ASSERT(raid_bdev_init() == 0);
1401 
1402 	verify_raid_bdev_present("raid1", false);
1403 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1404 	rpc_bdev_raid_create(NULL, NULL);
1405 	CU_ASSERT(g_rpc_err == 0);
1406 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1407 	free_test_req(&construct_req);
1408 
1409 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1410 	rpc_bdev_raid_delete(NULL, NULL);
1411 	CU_ASSERT(g_rpc_err == 0);
1412 	verify_raid_bdev_present("raid1", false);
1413 
1414 	raid_bdev_exit();
1415 	base_bdevs_cleanup();
1416 	reset_globals();
1417 }
1418 
1419 static void
1420 test_create_raid_invalid_args(void)
1421 {
1422 	struct rpc_bdev_raid_create req;
1423 	struct rpc_bdev_raid_delete destroy_req;
1424 	struct raid_bdev *raid_bdev;
1425 
1426 	set_globals();
1427 	CU_ASSERT(raid_bdev_init() == 0);
1428 
1429 	verify_raid_bdev_present("raid1", false);
1430 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1431 	req.level = INVALID_RAID_LEVEL;
1432 	rpc_bdev_raid_create(NULL, NULL);
1433 	CU_ASSERT(g_rpc_err == 1);
1434 	free_test_req(&req);
1435 	verify_raid_bdev_present("raid1", false);
1436 
1437 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
1438 	rpc_bdev_raid_create(NULL, NULL);
1439 	CU_ASSERT(g_rpc_err == 1);
1440 	free_test_req(&req);
1441 	verify_raid_bdev_present("raid1", false);
1442 
1443 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1444 	req.strip_size_kb = 1231;
1445 	rpc_bdev_raid_create(NULL, NULL);
1446 	CU_ASSERT(g_rpc_err == 1);
1447 	free_test_req(&req);
1448 	verify_raid_bdev_present("raid1", false);
1449 
1450 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1451 	rpc_bdev_raid_create(NULL, NULL);
1452 	CU_ASSERT(g_rpc_err == 0);
1453 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1454 	free_test_req(&req);
1455 
1456 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1457 	rpc_bdev_raid_create(NULL, NULL);
1458 	CU_ASSERT(g_rpc_err == 1);
1459 	free_test_req(&req);
1460 
1461 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
1462 	rpc_bdev_raid_create(NULL, NULL);
1463 	CU_ASSERT(g_rpc_err == 1);
1464 	free_test_req(&req);
1465 	verify_raid_bdev_present("raid2", false);
1466 
1467 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1468 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1469 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1470 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1471 	rpc_bdev_raid_create(NULL, NULL);
1472 	CU_ASSERT(g_rpc_err == 1);
1473 	free_test_req(&req);
1474 	verify_raid_bdev_present("raid2", false);
1475 
1476 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1477 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1478 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1479 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1480 	rpc_bdev_raid_create(NULL, NULL);
1481 	CU_ASSERT(g_rpc_err == 0);
1482 	free_test_req(&req);
1483 	verify_raid_bdev_present("raid2", true);
1484 	raid_bdev = raid_bdev_find_by_name("raid2");
1485 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1486 	check_and_remove_raid_bdev(raid_bdev);
1487 
1488 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1489 	rpc_bdev_raid_create(NULL, NULL);
1490 	CU_ASSERT(g_rpc_err == 0);
1491 	free_test_req(&req);
1492 	verify_raid_bdev_present("raid2", true);
1493 	verify_raid_bdev_present("raid1", true);
1494 
1495 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1496 	rpc_bdev_raid_delete(NULL, NULL);
1497 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1498 	rpc_bdev_raid_delete(NULL, NULL);
1499 	raid_bdev_exit();
1500 	base_bdevs_cleanup();
1501 	reset_globals();
1502 }
1503 
1504 static void
1505 test_delete_raid_invalid_args(void)
1506 {
1507 	struct rpc_bdev_raid_create construct_req;
1508 	struct rpc_bdev_raid_delete destroy_req;
1509 
1510 	set_globals();
1511 	CU_ASSERT(raid_bdev_init() == 0);
1512 
1513 	verify_raid_bdev_present("raid1", false);
1514 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1515 	rpc_bdev_raid_create(NULL, NULL);
1516 	CU_ASSERT(g_rpc_err == 0);
1517 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1518 	free_test_req(&construct_req);
1519 
1520 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1521 	rpc_bdev_raid_delete(NULL, NULL);
1522 	CU_ASSERT(g_rpc_err == 1);
1523 
1524 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1525 	rpc_bdev_raid_delete(NULL, NULL);
1526 	CU_ASSERT(g_rpc_err == 1);
1527 	free(destroy_req.name);
1528 	verify_raid_bdev_present("raid1", true);
1529 
1530 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1531 	rpc_bdev_raid_delete(NULL, NULL);
1532 	CU_ASSERT(g_rpc_err == 0);
1533 	verify_raid_bdev_present("raid1", false);
1534 
1535 	raid_bdev_exit();
1536 	base_bdevs_cleanup();
1537 	reset_globals();
1538 }
1539 
1540 static void
1541 test_io_channel(void)
1542 {
1543 	struct rpc_bdev_raid_create req;
1544 	struct rpc_bdev_raid_delete destroy_req;
1545 	struct raid_bdev *pbdev;
1546 	struct spdk_io_channel *ch;
1547 	struct raid_bdev_io_channel *ch_ctx;
1548 
1549 	set_globals();
1550 	CU_ASSERT(raid_bdev_init() == 0);
1551 
1552 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1553 	verify_raid_bdev_present("raid1", false);
1554 	rpc_bdev_raid_create(NULL, NULL);
1555 	CU_ASSERT(g_rpc_err == 0);
1556 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1557 
1558 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1559 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1560 			break;
1561 		}
1562 	}
1563 	CU_ASSERT(pbdev != NULL);
1564 
1565 	ch = spdk_get_io_channel(pbdev);
1566 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1567 
1568 	ch_ctx = spdk_io_channel_get_ctx(ch);
1569 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1570 
1571 	free_test_req(&req);
1572 
1573 	spdk_put_io_channel(ch);
1574 
1575 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1576 	rpc_bdev_raid_delete(NULL, NULL);
1577 	CU_ASSERT(g_rpc_err == 0);
1578 	verify_raid_bdev_present("raid1", false);
1579 
1580 	raid_bdev_exit();
1581 	base_bdevs_cleanup();
1582 	reset_globals();
1583 }
1584 
1585 static void
1586 test_write_io(void)
1587 {
1588 	struct rpc_bdev_raid_create req;
1589 	struct rpc_bdev_raid_delete destroy_req;
1590 	struct raid_bdev *pbdev;
1591 	struct spdk_io_channel *ch;
1592 	struct raid_bdev_io_channel *ch_ctx;
1593 	uint8_t i;
1594 	struct spdk_bdev_io *bdev_io;
1595 	uint64_t io_len;
1596 	uint64_t lba = 0;
1597 
1598 	set_globals();
1599 	CU_ASSERT(raid_bdev_init() == 0);
1600 
1601 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1602 	verify_raid_bdev_present("raid1", false);
1603 	rpc_bdev_raid_create(NULL, NULL);
1604 	CU_ASSERT(g_rpc_err == 0);
1605 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1606 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1607 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1608 			break;
1609 		}
1610 	}
1611 	CU_ASSERT(pbdev != NULL);
1612 
1613 	ch = spdk_get_io_channel(pbdev);
1614 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1615 
1616 	ch_ctx = spdk_io_channel_get_ctx(ch);
1617 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1618 
1619 	/* test 2 IO sizes based on global strip size set earlier */
1620 	for (i = 0; i < 2; i++) {
1621 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1622 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1623 		io_len = (g_strip_size / 2) << i;
1624 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1625 		lba += g_strip_size;
1626 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1627 		g_io_output_index = 0;
1628 		generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
1629 			     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
1630 		raid_bdev_submit_request(ch, bdev_io);
1631 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1632 			  g_child_io_status_flag);
1633 		bdev_io_cleanup(bdev_io);
1634 	}
1635 
1636 	free_test_req(&req);
1637 	spdk_put_io_channel(ch);
1638 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1639 	rpc_bdev_raid_delete(NULL, NULL);
1640 	CU_ASSERT(g_rpc_err == 0);
1641 	verify_raid_bdev_present("raid1", false);
1642 
1643 	raid_bdev_exit();
1644 	base_bdevs_cleanup();
1645 	reset_globals();
1646 }
1647 
1648 static void
1649 test_read_io(void)
1650 {
1651 	struct rpc_bdev_raid_create req;
1652 	struct rpc_bdev_raid_delete destroy_req;
1653 	struct raid_bdev *pbdev;
1654 	struct spdk_io_channel *ch;
1655 	struct raid_bdev_io_channel *ch_ctx;
1656 	uint8_t i;
1657 	struct spdk_bdev_io *bdev_io;
1658 	uint64_t io_len;
1659 	uint64_t lba;
1660 
1661 	set_globals();
1662 	CU_ASSERT(raid_bdev_init() == 0);
1663 
1664 	verify_raid_bdev_present("raid1", false);
1665 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1666 	rpc_bdev_raid_create(NULL, NULL);
1667 	CU_ASSERT(g_rpc_err == 0);
1668 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1669 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1670 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1671 			break;
1672 		}
1673 	}
1674 	CU_ASSERT(pbdev != NULL);
1675 
1676 	ch = spdk_get_io_channel(pbdev);
1677 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1678 
1679 	ch_ctx = spdk_io_channel_get_ctx(ch);
1680 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1681 
1682 	/* test 2 IO sizes based on global strip size set earlier */
1683 	lba = 0;
1684 	for (i = 0; i < 2; i++) {
1685 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1686 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1687 		io_len = (g_strip_size / 2) << i;
1688 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
1689 		lba += g_strip_size;
1690 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1691 		g_io_output_index = 0;
1692 		raid_bdev_submit_request(ch, bdev_io);
1693 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1694 			  g_child_io_status_flag);
1695 		bdev_io_cleanup(bdev_io);
1696 	}
1697 
1698 	free_test_req(&req);
1699 	spdk_put_io_channel(ch);
1700 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1701 	rpc_bdev_raid_delete(NULL, NULL);
1702 	CU_ASSERT(g_rpc_err == 0);
1703 	verify_raid_bdev_present("raid1", false);
1704 
1705 	raid_bdev_exit();
1706 	base_bdevs_cleanup();
1707 	reset_globals();
1708 }
1709 
1710 static void
1711 raid_bdev_io_generate_by_strips(uint64_t n_strips)
1712 {
1713 	uint64_t lba;
1714 	uint64_t nblocks;
1715 	uint64_t start_offset;
1716 	uint64_t end_offset;
1717 	uint64_t offsets_in_strip[3];
1718 	uint64_t start_bdev_idx;
1719 	uint64_t start_bdev_offset;
1720 	uint64_t start_bdev_idxs[3];
1721 	int i, j, l;
1722 
1723 	/* 3 different situations of offset in strip */
1724 	offsets_in_strip[0] = 0;
1725 	offsets_in_strip[1] = g_strip_size >> 1;
1726 	offsets_in_strip[2] = g_strip_size - 1;
1727 
1728 	/* 3 different situations of start_bdev_idx */
1729 	start_bdev_idxs[0] = 0;
1730 	start_bdev_idxs[1] = g_max_base_drives >> 1;
1731 	start_bdev_idxs[2] = g_max_base_drives - 1;
1732 
1733 	/* consider different offset in strip */
1734 	for (i = 0; i < 3; i++) {
1735 		start_offset = offsets_in_strip[i];
1736 		for (j = 0; j < 3; j++) {
1737 			end_offset = offsets_in_strip[j];
1738 			if (n_strips == 1 && start_offset > end_offset) {
1739 				continue;
1740 			}
1741 
1742 			/* consider at which base_bdev lba is started. */
1743 			for (l = 0; l < 3; l++) {
1744 				start_bdev_idx = start_bdev_idxs[l];
1745 				start_bdev_offset = start_bdev_idx * g_strip_size;
1746 				lba = g_lba_offset + start_bdev_offset + start_offset;
1747 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
1748 
1749 				g_io_ranges[g_io_range_idx].lba = lba;
1750 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
1751 
1752 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
1753 				g_io_range_idx++;
1754 			}
1755 		}
1756 	}
1757 }
1758 
1759 static void
1760 raid_bdev_io_generate(void)
1761 {
1762 	uint64_t n_strips;
1763 	uint64_t n_strips_span = g_max_base_drives;
1764 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
1765 				      g_max_base_drives * 2, g_max_base_drives * 3,
1766 				      g_max_base_drives * 4
1767 				     };
1768 	uint32_t i;
1769 
1770 	g_io_range_idx = 0;
1771 
1772 	/* consider different number of strips from 1 to strips spanned base bdevs,
1773 	 * and even to times of strips spanned base bdevs
1774 	 */
1775 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
1776 		raid_bdev_io_generate_by_strips(n_strips);
1777 	}
1778 
1779 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
1780 		n_strips = n_strips_times[i];
1781 		raid_bdev_io_generate_by_strips(n_strips);
1782 	}
1783 }
1784 
1785 static void
1786 test_unmap_io(void)
1787 {
1788 	struct rpc_bdev_raid_create req;
1789 	struct rpc_bdev_raid_delete destroy_req;
1790 	struct raid_bdev *pbdev;
1791 	struct spdk_io_channel *ch;
1792 	struct raid_bdev_io_channel *ch_ctx;
1793 	struct spdk_bdev_io *bdev_io;
1794 	uint32_t count;
1795 	uint64_t io_len;
1796 	uint64_t lba;
1797 
1798 	set_globals();
1799 	CU_ASSERT(raid_bdev_init() == 0);
1800 
1801 	verify_raid_bdev_present("raid1", false);
1802 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1803 	rpc_bdev_raid_create(NULL, NULL);
1804 	CU_ASSERT(g_rpc_err == 0);
1805 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1806 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1807 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1808 			break;
1809 		}
1810 	}
1811 	CU_ASSERT(pbdev != NULL);
1812 
1813 	ch = spdk_get_io_channel(pbdev);
1814 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1815 
1816 	ch_ctx = spdk_io_channel_get_ctx(ch);
1817 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1818 
1819 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
1820 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
1821 
1822 	raid_bdev_io_generate();
1823 	for (count = 0; count < g_io_range_idx; count++) {
1824 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1825 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1826 		io_len = g_io_ranges[count].nblocks;
1827 		lba = g_io_ranges[count].lba;
1828 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
1829 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1830 		g_io_output_index = 0;
1831 		raid_bdev_submit_request(ch, bdev_io);
1832 		verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1833 					  g_child_io_status_flag);
1834 		bdev_io_cleanup(bdev_io);
1835 	}
1836 
1837 	free_test_req(&req);
1838 	spdk_put_io_channel(ch);
1839 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1840 	rpc_bdev_raid_delete(NULL, NULL);
1841 	CU_ASSERT(g_rpc_err == 0);
1842 	verify_raid_bdev_present("raid1", false);
1843 
1844 	raid_bdev_exit();
1845 	base_bdevs_cleanup();
1846 	reset_globals();
1847 }
1848 
1849 /* Test IO failures */
1850 static void
1851 test_io_failure(void)
1852 {
1853 	struct rpc_bdev_raid_create req;
1854 	struct rpc_bdev_raid_delete destroy_req;
1855 	struct raid_bdev *pbdev;
1856 	struct spdk_io_channel *ch;
1857 	struct raid_bdev_io_channel *ch_ctx;
1858 	struct spdk_bdev_io *bdev_io;
1859 	uint32_t count;
1860 	uint64_t io_len;
1861 	uint64_t lba;
1862 
1863 	set_globals();
1864 	CU_ASSERT(raid_bdev_init() == 0);
1865 
1866 	verify_raid_bdev_present("raid1", false);
1867 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1868 	rpc_bdev_raid_create(NULL, NULL);
1869 	CU_ASSERT(g_rpc_err == 0);
1870 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1871 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1872 		if (strcmp(pbdev->bdev.name, req.name) == 0) {
1873 			break;
1874 		}
1875 	}
1876 	CU_ASSERT(pbdev != NULL);
1877 
1878 	ch = spdk_get_io_channel(pbdev);
1879 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1880 
1881 	ch_ctx = spdk_io_channel_get_ctx(ch);
1882 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1883 
1884 	lba = 0;
1885 	for (count = 0; count < 1; count++) {
1886 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1887 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1888 		io_len = (g_strip_size / 2) << count;
1889 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
1890 		lba += g_strip_size;
1891 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1892 		g_io_output_index = 0;
1893 		raid_bdev_submit_request(ch, bdev_io);
1894 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1895 			  INVALID_IO_SUBMIT);
1896 		bdev_io_cleanup(bdev_io);
1897 	}
1898 
1899 
1900 	lba = 0;
1901 	g_child_io_status_flag = false;
1902 	for (count = 0; count < 1; count++) {
1903 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1904 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1905 		io_len = (g_strip_size / 2) << count;
1906 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1907 		lba += g_strip_size;
1908 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1909 		g_io_output_index = 0;
1910 		generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
1911 			     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
1912 		raid_bdev_submit_request(ch, bdev_io);
1913 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1914 			  g_child_io_status_flag);
1915 		bdev_io_cleanup(bdev_io);
1916 	}
1917 
1918 	free_test_req(&req);
1919 	spdk_put_io_channel(ch);
1920 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1921 	rpc_bdev_raid_delete(NULL, NULL);
1922 	CU_ASSERT(g_rpc_err == 0);
1923 	verify_raid_bdev_present("raid1", false);
1924 
1925 	raid_bdev_exit();
1926 	base_bdevs_cleanup();
1927 	reset_globals();
1928 }
1929 
1930 /* Test reset IO */
1931 static void
1932 test_reset_io(void)
1933 {
1934 	struct rpc_bdev_raid_create req;
1935 	struct rpc_bdev_raid_delete destroy_req;
1936 	struct raid_bdev *pbdev;
1937 	struct spdk_io_channel *ch;
1938 	struct raid_bdev_io_channel *ch_ctx;
1939 	struct spdk_bdev_io *bdev_io;
1940 
1941 	set_globals();
1942 	CU_ASSERT(raid_bdev_init() == 0);
1943 
1944 	verify_raid_bdev_present("raid1", false);
1945 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1946 	rpc_bdev_raid_create(NULL, NULL);
1947 	CU_ASSERT(g_rpc_err == 0);
1948 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1949 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1950 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1951 			break;
1952 		}
1953 	}
1954 	CU_ASSERT(pbdev != NULL);
1955 
1956 	ch = spdk_get_io_channel(pbdev);
1957 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1958 
1959 	ch_ctx = spdk_io_channel_get_ctx(ch);
1960 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1961 
1962 	g_bdev_io_submit_status = 0;
1963 	g_child_io_status_flag = true;
1964 
1965 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1966 
1967 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1968 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1969 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1970 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1971 	g_io_output_index = 0;
1972 	raid_bdev_submit_request(ch, bdev_io);
1973 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1974 			true);
1975 	bdev_io_cleanup(bdev_io);
1976 
1977 	free_test_req(&req);
1978 	spdk_put_io_channel(ch);
1979 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1980 	rpc_bdev_raid_delete(NULL, NULL);
1981 	CU_ASSERT(g_rpc_err == 0);
1982 	verify_raid_bdev_present("raid1", false);
1983 
1984 	raid_bdev_exit();
1985 	base_bdevs_cleanup();
1986 	reset_globals();
1987 }
1988 
1989 /* Create multiple raids, destroy raids without IO, get_raids related tests */
1990 static void
1991 test_multi_raid_no_io(void)
1992 {
1993 	struct rpc_bdev_raid_create *construct_req;
1994 	struct rpc_bdev_raid_delete destroy_req;
1995 	struct rpc_bdev_raid_get_bdevs get_raids_req;
1996 	uint8_t i;
1997 	char name[16];
1998 	uint8_t bbdev_idx = 0;
1999 
2000 	set_globals();
2001 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
2002 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
2003 	CU_ASSERT(raid_bdev_init() == 0);
2004 	for (i = 0; i < g_max_raids; i++) {
2005 		snprintf(name, 16, "%s%u", "raid", i);
2006 		verify_raid_bdev_present(name, false);
2007 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
2008 		bbdev_idx += g_max_base_drives;
2009 		rpc_bdev_raid_create(NULL, NULL);
2010 		CU_ASSERT(g_rpc_err == 0);
2011 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
2012 	}
2013 
2014 	create_get_raids_req(&get_raids_req, "all", 0);
2015 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2016 	CU_ASSERT(g_rpc_err == 0);
2017 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
2018 	for (i = 0; i < g_get_raids_count; i++) {
2019 		free(g_get_raids_output[i]);
2020 	}
2021 
2022 	create_get_raids_req(&get_raids_req, "online", 0);
2023 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2024 	CU_ASSERT(g_rpc_err == 0);
2025 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
2026 	for (i = 0; i < g_get_raids_count; i++) {
2027 		free(g_get_raids_output[i]);
2028 	}
2029 
2030 	create_get_raids_req(&get_raids_req, "configuring", 0);
2031 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2032 	CU_ASSERT(g_rpc_err == 0);
2033 	CU_ASSERT(g_get_raids_count == 0);
2034 
2035 	create_get_raids_req(&get_raids_req, "offline", 0);
2036 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2037 	CU_ASSERT(g_rpc_err == 0);
2038 	CU_ASSERT(g_get_raids_count == 0);
2039 
2040 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
2041 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2042 	CU_ASSERT(g_rpc_err == 1);
2043 	CU_ASSERT(g_get_raids_count == 0);
2044 
2045 	create_get_raids_req(&get_raids_req, "all", 1);
2046 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2047 	CU_ASSERT(g_rpc_err == 1);
2048 	free(get_raids_req.category);
2049 	CU_ASSERT(g_get_raids_count == 0);
2050 
2051 	create_get_raids_req(&get_raids_req, "all", 0);
2052 	rpc_bdev_raid_get_bdevs(NULL, NULL);
2053 	CU_ASSERT(g_rpc_err == 0);
2054 	CU_ASSERT(g_get_raids_count == g_max_raids);
2055 	for (i = 0; i < g_get_raids_count; i++) {
2056 		free(g_get_raids_output[i]);
2057 	}
2058 
2059 	for (i = 0; i < g_max_raids; i++) {
2060 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
2061 		snprintf(name, 16, "%s", construct_req[i].name);
2062 		create_raid_bdev_delete_req(&destroy_req, name, 0);
2063 		rpc_bdev_raid_delete(NULL, NULL);
2064 		CU_ASSERT(g_rpc_err == 0);
2065 		verify_raid_bdev_present(name, false);
2066 	}
2067 	raid_bdev_exit();
2068 	for (i = 0; i < g_max_raids; i++) {
2069 		free_test_req(&construct_req[i]);
2070 	}
2071 	free(construct_req);
2072 	base_bdevs_cleanup();
2073 	reset_globals();
2074 }
2075 
2076 /* Create multiple raids, fire IOs on raids */
2077 static void
2078 test_multi_raid_with_io(void)
2079 {
2080 	struct rpc_bdev_raid_create *construct_req;
2081 	struct rpc_bdev_raid_delete destroy_req;
2082 	uint8_t i;
2083 	char name[16];
2084 	uint8_t bbdev_idx = 0;
2085 	struct raid_bdev *pbdev;
2086 	struct spdk_io_channel **channels;
2087 	struct spdk_bdev_io *bdev_io;
2088 	uint64_t io_len;
2089 	uint64_t lba = 0;
2090 	int16_t iotype;
2091 
2092 	set_globals();
2093 	construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
2094 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
2095 	CU_ASSERT(raid_bdev_init() == 0);
2096 	channels = calloc(g_max_raids, sizeof(*channels));
2097 	SPDK_CU_ASSERT_FATAL(channels != NULL);
2098 
2099 	for (i = 0; i < g_max_raids; i++) {
2100 		snprintf(name, 16, "%s%u", "raid", i);
2101 		verify_raid_bdev_present(name, false);
2102 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
2103 		bbdev_idx += g_max_base_drives;
2104 		rpc_bdev_raid_create(NULL, NULL);
2105 		CU_ASSERT(g_rpc_err == 0);
2106 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
2107 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2108 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
2109 				break;
2110 			}
2111 		}
2112 		CU_ASSERT(pbdev != NULL);
2113 
2114 		channels[i] = spdk_get_io_channel(pbdev);
2115 		SPDK_CU_ASSERT_FATAL(channels[i] != NULL);
2116 	}
2117 
2118 	/* This will perform a write on the first raid and a read on the second. It can be
2119 	 * expanded in the future to perform r/w on each raid device in the event that
2120 	 * multiple raid levels are supported.
2121 	 */
2122 	for (i = 0; i < g_max_raids; i++) {
2123 		struct spdk_io_channel *ch = channels[i];
2124 		struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch);
2125 
2126 		SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
2127 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2128 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2129 		io_len = g_strip_size;
2130 		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
2131 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2132 		g_io_output_index = 0;
2133 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2134 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
2135 				break;
2136 			}
2137 		}
2138 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype);
2139 		CU_ASSERT(pbdev != NULL);
2140 		if (iotype == SPDK_BDEV_IO_TYPE_WRITE) {
2141 			generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2142 				     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2143 		}
2144 		raid_bdev_submit_request(ch, bdev_io);
2145 		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
2146 			  g_child_io_status_flag);
2147 		bdev_io_cleanup(bdev_io);
2148 	}
2149 
2150 	for (i = 0; i < g_max_raids; i++) {
2151 		spdk_put_io_channel(channels[i]);
2152 		snprintf(name, 16, "%s", construct_req[i].name);
2153 		create_raid_bdev_delete_req(&destroy_req, name, 0);
2154 		rpc_bdev_raid_delete(NULL, NULL);
2155 		CU_ASSERT(g_rpc_err == 0);
2156 		verify_raid_bdev_present(name, false);
2157 	}
2158 	raid_bdev_exit();
2159 	for (i = 0; i < g_max_raids; i++) {
2160 		free_test_req(&construct_req[i]);
2161 	}
2162 	free(construct_req);
2163 	free(channels);
2164 	base_bdevs_cleanup();
2165 	reset_globals();
2166 }
2167 
2168 static void
2169 test_io_type_supported(void)
2170 {
2171 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
2172 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
2173 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
2174 }
2175 
2176 static void
2177 test_raid_json_dump_info(void)
2178 {
2179 	struct rpc_bdev_raid_create req;
2180 	struct rpc_bdev_raid_delete destroy_req;
2181 	struct raid_bdev *pbdev;
2182 
2183 	set_globals();
2184 	CU_ASSERT(raid_bdev_init() == 0);
2185 
2186 	verify_raid_bdev_present("raid1", false);
2187 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2188 	rpc_bdev_raid_create(NULL, NULL);
2189 	CU_ASSERT(g_rpc_err == 0);
2190 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2191 
2192 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2193 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2194 			break;
2195 		}
2196 	}
2197 	CU_ASSERT(pbdev != NULL);
2198 
2199 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
2200 
2201 	free_test_req(&req);
2202 
2203 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2204 	rpc_bdev_raid_delete(NULL, NULL);
2205 	CU_ASSERT(g_rpc_err == 0);
2206 	verify_raid_bdev_present("raid1", false);
2207 
2208 	raid_bdev_exit();
2209 	base_bdevs_cleanup();
2210 	reset_globals();
2211 }
2212 
2213 static void
2214 test_context_size(void)
2215 {
2216 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
2217 }
2218 
2219 static void
2220 test_raid_level_conversions(void)
2221 {
2222 	const char *raid_str;
2223 
2224 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
2225 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
2226 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
2227 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
2228 
2229 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
2230 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2231 	raid_str = raid_bdev_level_to_str(1234);
2232 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
2233 	raid_str = raid_bdev_level_to_str(RAID0);
2234 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
2235 }
2236 
2237 static void
2238 test_create_raid_superblock(void)
2239 {
2240 	struct rpc_bdev_raid_create req;
2241 	struct rpc_bdev_raid_delete delete_req;
2242 
2243 	set_globals();
2244 	CU_ASSERT(raid_bdev_init() == 0);
2245 
2246 	verify_raid_bdev_present("raid1", false);
2247 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
2248 	rpc_bdev_raid_create(NULL, NULL);
2249 	CU_ASSERT(g_rpc_err == 0);
2250 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2251 	free_test_req(&req);
2252 
2253 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
2254 	rpc_bdev_raid_delete(NULL, NULL);
2255 	CU_ASSERT(g_rpc_err == 0);
2256 	raid_bdev_exit();
2257 	base_bdevs_cleanup();
2258 	reset_globals();
2259 }
2260 
2261 static void
2262 complete_process_request(void *ctx)
2263 {
2264 	struct raid_bdev_process_request *process_req = ctx;
2265 
2266 	raid_bdev_process_request_complete(process_req, 0);
2267 }
2268 
2269 static int
2270 submit_process_request(struct raid_bdev_process_request *process_req,
2271 		       struct raid_bdev_io_channel *raid_ch)
2272 {
2273 	struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch));
2274 
2275 	*(uint64_t *)raid_bdev->module_private += process_req->num_blocks;
2276 
2277 	spdk_thread_send_msg(spdk_get_thread(), complete_process_request, process_req);
2278 
2279 	return process_req->num_blocks;
2280 }
2281 
2282 static void
2283 test_raid_process(void)
2284 {
2285 	struct rpc_bdev_raid_create req;
2286 	struct rpc_bdev_raid_delete destroy_req;
2287 	struct raid_bdev *pbdev;
2288 	struct spdk_bdev *base_bdev;
2289 	struct spdk_thread *process_thread;
2290 	uint64_t num_blocks_processed = 0;
2291 
2292 	set_globals();
2293 	CU_ASSERT(raid_bdev_init() == 0);
2294 
2295 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2296 	verify_raid_bdev_present("raid1", false);
2297 	TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) {
2298 		base_bdev->blockcnt = 128;
2299 	}
2300 	rpc_bdev_raid_create(NULL, NULL);
2301 	CU_ASSERT(g_rpc_err == 0);
2302 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2303 	free_test_req(&req);
2304 
2305 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2306 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2307 			break;
2308 		}
2309 	}
2310 	CU_ASSERT(pbdev != NULL);
2311 
2312 	pbdev->module->submit_process_request = submit_process_request;
2313 	pbdev->module_private = &num_blocks_processed;
2314 
2315 	CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0);
2316 	poll_threads();
2317 
2318 	SPDK_CU_ASSERT_FATAL(pbdev->process != NULL);
2319 
2320 	process_thread = spdk_thread_get_by_id(spdk_thread_get_id(spdk_get_thread()) + 1);
2321 
2322 	while (spdk_thread_poll(process_thread, 0, 0) > 0) {
2323 		poll_threads();
2324 	}
2325 
2326 	CU_ASSERT(pbdev->process == NULL);
2327 	CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt);
2328 
2329 	poll_threads();
2330 
2331 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2332 	rpc_bdev_raid_delete(NULL, NULL);
2333 	CU_ASSERT(g_rpc_err == 0);
2334 	verify_raid_bdev_present("raid1", false);
2335 
2336 	raid_bdev_exit();
2337 	base_bdevs_cleanup();
2338 	reset_globals();
2339 }
2340 
2341 static void
2342 test_raid_io_split(void)
2343 {
2344 	struct rpc_bdev_raid_create req;
2345 	struct rpc_bdev_raid_delete destroy_req;
2346 	struct raid_bdev *pbdev;
2347 	struct spdk_io_channel *ch;
2348 	struct raid_bdev_io_channel *raid_ch;
2349 	struct spdk_bdev_io *bdev_io;
2350 	struct raid_bdev_io *raid_io;
2351 	uint64_t split_offset;
2352 	struct iovec iovs_orig[4];
2353 	struct raid_bdev_process process = { };
2354 
2355 	set_globals();
2356 	CU_ASSERT(raid_bdev_init() == 0);
2357 
2358 	verify_raid_bdev_present("raid1", false);
2359 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
2360 	rpc_bdev_raid_create(NULL, NULL);
2361 	CU_ASSERT(g_rpc_err == 0);
2362 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
2363 
2364 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
2365 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
2366 			break;
2367 		}
2368 	}
2369 	CU_ASSERT(pbdev != NULL);
2370 	pbdev->bdev.md_len = 8;
2371 
2372 	process.raid_bdev = pbdev;
2373 	process.target = &pbdev->base_bdev_info[0];
2374 	pbdev->process = &process;
2375 	ch = spdk_get_io_channel(pbdev);
2376 	SPDK_CU_ASSERT_FATAL(ch != NULL);
2377 	raid_ch = spdk_io_channel_get_ctx(ch);
2378 	g_bdev_io_defer_completion = true;
2379 
2380 	/* test split of bdev_io with 1 iovec */
2381 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2382 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2383 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2384 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE);
2385 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2386 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2387 	g_io_output_index = 0;
2388 
2389 	split_offset = 1;
2390 	raid_ch->process.offset = split_offset;
2391 	generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2392 		     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2393 	raid_bdev_submit_request(ch, bdev_io);
2394 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2395 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2396 	CU_ASSERT(raid_io->iovcnt == 1);
2397 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2398 	CU_ASSERT(raid_io->iovs == raid_io->split.iov);
2399 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len);
2400 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len);
2401 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2402 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2403 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2404 	}
2405 	complete_deferred_ios();
2406 	CU_ASSERT(raid_io->num_blocks == split_offset);
2407 	CU_ASSERT(raid_io->offset_blocks == 0);
2408 	CU_ASSERT(raid_io->iovcnt == 1);
2409 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2410 	CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len);
2411 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2412 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2413 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2414 	}
2415 	complete_deferred_ios();
2416 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2417 	CU_ASSERT(raid_io->offset_blocks == 0);
2418 	CU_ASSERT(raid_io->iovcnt == 1);
2419 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
2420 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len);
2421 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2422 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2423 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2424 	}
2425 
2426 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2427 	CU_ASSERT(g_io_output_index == 2);
2428 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2429 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2430 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2431 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2432 	bdev_io_cleanup(bdev_io);
2433 
2434 	/* test split of bdev_io with 4 iovecs */
2435 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
2436 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
2437 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
2438 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE,
2439 			    4, g_strip_size / 4 * g_block_len);
2440 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
2441 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2442 	g_io_output_index = 0;
2443 
2444 	split_offset = 1; /* split at the first iovec */
2445 	raid_ch->process.offset = split_offset;
2446 	generate_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
2447 		     bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
2448 	raid_bdev_submit_request(ch, bdev_io);
2449 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2450 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2451 	CU_ASSERT(raid_io->iovcnt == 4);
2452 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]);
2453 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]);
2454 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len);
2455 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len -  g_block_len);
2456 	CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0);
2457 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2458 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2459 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2460 	}
2461 	complete_deferred_ios();
2462 	CU_ASSERT(raid_io->num_blocks == split_offset);
2463 	CU_ASSERT(raid_io->offset_blocks == 0);
2464 	CU_ASSERT(raid_io->iovcnt == 1);
2465 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2466 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base);
2467 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2468 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2469 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2470 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2471 	}
2472 	complete_deferred_ios();
2473 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2474 	CU_ASSERT(raid_io->offset_blocks == 0);
2475 	CU_ASSERT(raid_io->iovcnt == 4);
2476 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2477 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2478 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2479 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2480 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2481 	}
2482 
2483 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2484 	CU_ASSERT(g_io_output_index == 2);
2485 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2486 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2487 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2488 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2489 
2490 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2491 	g_io_output_index = 0;
2492 
2493 	split_offset = g_strip_size / 2; /* split exactly between second and third iovec */
2494 	raid_ch->process.offset = split_offset;
2495 	raid_bdev_submit_request(ch, bdev_io);
2496 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2497 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2498 	CU_ASSERT(raid_io->iovcnt == 2);
2499 	CU_ASSERT(raid_io->split.iov == NULL);
2500 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2501 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2502 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2503 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2504 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2505 	}
2506 	complete_deferred_ios();
2507 	CU_ASSERT(raid_io->num_blocks == split_offset);
2508 	CU_ASSERT(raid_io->offset_blocks == 0);
2509 	CU_ASSERT(raid_io->iovcnt == 2);
2510 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2511 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2512 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2513 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2514 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2515 	}
2516 	complete_deferred_ios();
2517 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2518 	CU_ASSERT(raid_io->offset_blocks == 0);
2519 	CU_ASSERT(raid_io->iovcnt == 4);
2520 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2521 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2522 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2523 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2524 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2525 	}
2526 
2527 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2528 	CU_ASSERT(g_io_output_index == 2);
2529 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2530 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2531 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2532 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2533 
2534 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2535 	g_io_output_index = 0;
2536 
2537 	split_offset = g_strip_size / 2 + 1; /* split at the third iovec */
2538 	raid_ch->process.offset = split_offset;
2539 	raid_bdev_submit_request(ch, bdev_io);
2540 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2541 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2542 	CU_ASSERT(raid_io->iovcnt == 2);
2543 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]);
2544 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
2545 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len);
2546 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len);
2547 	CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base);
2548 	CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len);
2549 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2550 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2551 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2552 	}
2553 	complete_deferred_ios();
2554 	CU_ASSERT(raid_io->num_blocks == split_offset);
2555 	CU_ASSERT(raid_io->offset_blocks == 0);
2556 	CU_ASSERT(raid_io->iovcnt == 3);
2557 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2558 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0);
2559 	CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base);
2560 	CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len);
2561 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2562 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2563 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2564 	}
2565 	complete_deferred_ios();
2566 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2567 	CU_ASSERT(raid_io->offset_blocks == 0);
2568 	CU_ASSERT(raid_io->iovcnt == 4);
2569 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2570 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2571 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2572 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2573 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2574 	}
2575 
2576 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2577 	CU_ASSERT(g_io_output_index == 2);
2578 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2579 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2580 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2581 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2582 
2583 	memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
2584 	g_io_output_index = 0;
2585 
2586 	split_offset = g_strip_size - 1; /* split at the last iovec */
2587 	raid_ch->process.offset = split_offset;
2588 	raid_bdev_submit_request(ch, bdev_io);
2589 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
2590 	CU_ASSERT(raid_io->offset_blocks == split_offset);
2591 	CU_ASSERT(raid_io->iovcnt == 1);
2592 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]);
2593 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]);
2594 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len);
2595 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
2596 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2597 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2598 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
2599 	}
2600 	complete_deferred_ios();
2601 	CU_ASSERT(raid_io->num_blocks == split_offset);
2602 	CU_ASSERT(raid_io->offset_blocks == 0);
2603 	CU_ASSERT(raid_io->iovcnt == 4);
2604 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2605 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0);
2606 	CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base);
2607 	CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len);
2608 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2609 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2610 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2611 	}
2612 	complete_deferred_ios();
2613 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
2614 	CU_ASSERT(raid_io->offset_blocks == 0);
2615 	CU_ASSERT(raid_io->iovcnt == 4);
2616 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
2617 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
2618 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
2619 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
2620 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
2621 	}
2622 
2623 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
2624 	CU_ASSERT(g_io_output_index == 2);
2625 	CU_ASSERT(g_io_output[0].offset_blocks == split_offset);
2626 	CU_ASSERT(g_io_output[0].num_blocks == g_strip_size - split_offset);
2627 	CU_ASSERT(g_io_output[1].offset_blocks == 0);
2628 	CU_ASSERT(g_io_output[1].num_blocks == split_offset);
2629 	bdev_io_cleanup(bdev_io);
2630 
2631 	spdk_put_io_channel(ch);
2632 	free_test_req(&req);
2633 	pbdev->process = NULL;
2634 
2635 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
2636 	rpc_bdev_raid_delete(NULL, NULL);
2637 	CU_ASSERT(g_rpc_err == 0);
2638 	verify_raid_bdev_present("raid1", false);
2639 
2640 	raid_bdev_exit();
2641 	base_bdevs_cleanup();
2642 	reset_globals();
2643 }
2644 
2645 static int
2646 test_bdev_ioch_create(void *io_device, void *ctx_buf)
2647 {
2648 	return 0;
2649 }
2650 
2651 static void
2652 test_bdev_ioch_destroy(void *io_device, void *ctx_buf)
2653 {
2654 }
2655 
2656 int
2657 main(int argc, char **argv)
2658 {
2659 	unsigned int    num_failures;
2660 
2661 	CU_TestInfo tests[] = {
2662 		{ "test_create_raid", test_create_raid },
2663 		{ "test_create_raid_superblock", test_create_raid_superblock },
2664 		{ "test_delete_raid", test_delete_raid },
2665 		{ "test_create_raid_invalid_args", test_create_raid_invalid_args },
2666 		{ "test_delete_raid_invalid_args", test_delete_raid_invalid_args },
2667 		{ "test_io_channel", test_io_channel },
2668 		{ "test_reset_io", test_reset_io },
2669 		{ "test_write_io", test_write_io },
2670 		{ "test_read_io", test_read_io },
2671 		{ "test_unmap_io", test_unmap_io },
2672 		{ "test_io_failure", test_io_failure },
2673 		{ "test_multi_raid_no_io", test_multi_raid_no_io },
2674 		{ "test_multi_raid_with_io", test_multi_raid_with_io },
2675 		{ "test_io_type_supported", test_io_type_supported },
2676 		{ "test_raid_json_dump_info", test_raid_json_dump_info },
2677 		{ "test_context_size", test_context_size },
2678 		{ "test_raid_level_conversions", test_raid_level_conversions },
2679 		{ "test_raid_io_split", test_raid_io_split },
2680 		CU_TEST_INFO_NULL,
2681 	};
2682 	/* TODO The RAID process test can only be run once for now, until the fix for getting the
2683 	 * process thread is merged */
2684 	CU_TestInfo tests_single_run[] = {
2685 		{ "test_raid_process", test_raid_process },
2686 		CU_TEST_INFO_NULL,
2687 	};
2688 	CU_SuiteInfo suites[] = {
2689 		{ "raid", set_test_opts, NULL, NULL, NULL, tests },
2690 		{ "raid_dif", set_test_opts_dif, NULL, NULL, NULL, tests },
2691 		{ "raid_single_run", set_test_opts, NULL, NULL, NULL, tests_single_run },
2692 		CU_SUITE_INFO_NULL,
2693 	};
2694 
2695 	CU_initialize_registry();
2696 	CU_register_suites(suites);
2697 
2698 	allocate_threads(1);
2699 	set_thread(0);
2700 	spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0,
2701 				NULL);
2702 
2703 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2704 	CU_cleanup_registry();
2705 
2706 	spdk_io_device_unregister(&g_bdev_ch_io_device, NULL);
2707 	free_threads();
2708 
2709 	return num_failures;
2710 }
2711