xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision ad5fc351dd221a287cce269ad0e50b11253cc48b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "bdev/raid/raid0.c"
15 #include "common/lib/ut_multithread.c"
16 
17 #define MAX_BASE_DRIVES 32
18 #define MAX_RAIDS 2
19 #define INVALID_IO_SUBMIT 0xFFFF
20 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
21 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
22 
23 struct spdk_bdev_channel {
24 	struct spdk_io_channel *channel;
25 };
26 
27 struct spdk_bdev_desc {
28 	struct spdk_bdev *bdev;
29 };
30 
31 /* Data structure to capture the output of IO for verification */
32 struct io_output {
33 	struct spdk_bdev_desc       *desc;
34 	struct spdk_io_channel      *ch;
35 	uint64_t                    offset_blocks;
36 	uint64_t                    num_blocks;
37 	spdk_bdev_io_completion_cb  cb;
38 	void                        *cb_arg;
39 	enum spdk_bdev_io_type      iotype;
40 };
41 
42 struct raid_io_ranges {
43 	uint64_t lba;
44 	uint64_t nblocks;
45 };
46 
47 /* Globals */
48 int g_bdev_io_submit_status;
49 struct io_output *g_io_output = NULL;
50 uint32_t g_io_output_index;
51 uint32_t g_io_comp_status;
52 bool g_child_io_status_flag;
53 void *g_rpc_req;
54 uint32_t g_rpc_req_size;
55 TAILQ_HEAD(bdev, spdk_bdev);
56 struct bdev g_bdev_list;
57 TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
58 struct waitq g_io_waitq;
59 uint32_t g_block_len;
60 uint32_t g_strip_size;
61 uint32_t g_max_io_size;
62 uint8_t g_max_base_drives;
63 uint8_t g_max_raids;
64 uint8_t g_ignore_io_output;
65 uint8_t g_rpc_err;
66 char *g_get_raids_output[MAX_RAIDS];
67 uint32_t g_get_raids_count;
68 uint8_t g_json_decode_obj_err;
69 uint8_t g_json_decode_obj_create;
70 uint8_t g_config_level_create = 0;
71 uint8_t g_test_multi_raids;
72 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
73 uint32_t g_io_range_idx;
74 uint64_t g_lba_offset;
75 struct spdk_io_channel g_io_channel;
76 
77 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
78 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
79 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
80 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
81 		enum spdk_bdev_io_type io_type), true);
82 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
83 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
84 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
85 		void *cb_arg), 0);
86 DEFINE_STUB(spdk_conf_next_section, struct spdk_conf_section *, (struct spdk_conf_section *sp),
87 	    NULL);
88 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
89 		uint32_t state_mask));
90 DEFINE_STUB_V(spdk_rpc_register_alias_deprecated, (const char *method, const char *alias));
91 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
92 					struct spdk_json_write_ctx *w));
93 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
94 		bool value));
95 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
96 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
97 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
98 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
99 		spdk_json_decode_fn decode_func,
100 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
101 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
102 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
103 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
104 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
105 		const char *name), 0);
106 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
107 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
108 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
109 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
110 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
111 		const char *name), 0);
112 DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
113 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
114 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
115 		uint64_t val), 0);
116 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
117 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
118 		struct spdk_bdev_io_wait_entry *entry), 0);
119 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
120 		struct spdk_memory_domain **domains,	int array_size), 0);
121 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
122 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), 0);
123 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
124 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev),
125 	    SPDK_DIF_DISABLE);
126 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
127 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
128 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
129 
130 int
131 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
132 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
133 {
134 	cb(NULL, -EINVAL, cb_ctx);
135 
136 	return 0;
137 }
138 
139 void
140 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
141 {
142 	cb(0, raid_bdev, cb_ctx);
143 }
144 
145 const struct spdk_uuid *
146 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
147 {
148 	return &bdev->uuid;
149 }
150 
151 struct spdk_io_channel *
152 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
153 {
154 	g_io_channel.thread = spdk_get_thread();
155 
156 	return &g_io_channel;
157 }
158 
159 static void
160 set_test_opts(void)
161 {
162 
163 	g_max_base_drives = MAX_BASE_DRIVES;
164 	g_max_raids = MAX_RAIDS;
165 	g_block_len = 4096;
166 	g_strip_size = 64;
167 	g_max_io_size = 1024;
168 
169 	printf("Test Options\n");
170 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
171 	       "g_max_raids = %u\n",
172 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
173 }
174 
175 /* Set globals before every test run */
176 static void
177 set_globals(void)
178 {
179 	uint32_t max_splits;
180 
181 	g_bdev_io_submit_status = 0;
182 	if (g_max_io_size < g_strip_size) {
183 		max_splits = 2;
184 	} else {
185 		max_splits = (g_max_io_size / g_strip_size) + 1;
186 	}
187 	if (max_splits < g_max_base_drives) {
188 		max_splits = g_max_base_drives;
189 	}
190 
191 	g_io_output = calloc(max_splits, sizeof(struct io_output));
192 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
193 	g_io_output_index = 0;
194 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
195 	g_get_raids_count = 0;
196 	g_io_comp_status = 0;
197 	g_ignore_io_output = 0;
198 	g_config_level_create = 0;
199 	g_rpc_err = 0;
200 	g_test_multi_raids = 0;
201 	g_child_io_status_flag = true;
202 	TAILQ_INIT(&g_bdev_list);
203 	TAILQ_INIT(&g_io_waitq);
204 	g_rpc_req = NULL;
205 	g_rpc_req_size = 0;
206 	g_json_decode_obj_err = 0;
207 	g_json_decode_obj_create = 0;
208 	g_lba_offset = 0;
209 }
210 
211 static void
212 base_bdevs_cleanup(void)
213 {
214 	struct spdk_bdev *bdev;
215 	struct spdk_bdev *bdev_next;
216 
217 	if (!TAILQ_EMPTY(&g_bdev_list)) {
218 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
219 			free(bdev->name);
220 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
221 			free(bdev);
222 		}
223 	}
224 }
225 
226 static void
227 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
228 {
229 	struct raid_base_bdev_info *base_info;
230 
231 	assert(raid_bdev != NULL);
232 	assert(raid_bdev->base_bdev_info != NULL);
233 
234 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
235 		if (base_info->desc) {
236 			raid_bdev_free_base_bdev_resource(base_info);
237 		}
238 	}
239 	assert(raid_bdev->num_base_bdevs_discovered == 0);
240 	raid_bdev_cleanup_and_free(raid_bdev);
241 }
242 
243 /* Reset globals */
244 static void
245 reset_globals(void)
246 {
247 	if (g_io_output) {
248 		free(g_io_output);
249 		g_io_output = NULL;
250 	}
251 	g_rpc_req = NULL;
252 	g_rpc_req_size = 0;
253 }
254 
255 void
256 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
257 		     uint64_t len)
258 {
259 	cb(bdev_io->internal.ch->channel, bdev_io, true);
260 }
261 
262 /* Store the IO completion status in global variable to verify by various tests */
263 void
264 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
265 {
266 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
267 }
268 
269 static void
270 set_io_output(struct io_output *output,
271 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
272 	      uint64_t offset_blocks, uint64_t num_blocks,
273 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
274 	      enum spdk_bdev_io_type iotype)
275 {
276 	output->desc = desc;
277 	output->ch = ch;
278 	output->offset_blocks = offset_blocks;
279 	output->num_blocks = num_blocks;
280 	output->cb = cb;
281 	output->cb_arg = cb_arg;
282 	output->iotype = iotype;
283 }
284 
285 /* It will cache the split IOs for verification */
286 int
287 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
288 			struct iovec *iov, int iovcnt,
289 			uint64_t offset_blocks, uint64_t num_blocks,
290 			spdk_bdev_io_completion_cb cb, void *cb_arg)
291 {
292 	struct io_output *output = &g_io_output[g_io_output_index];
293 	struct spdk_bdev_io *child_io;
294 
295 	if (g_ignore_io_output) {
296 		return 0;
297 	}
298 
299 	if (g_max_io_size < g_strip_size) {
300 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
301 	} else {
302 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
303 	}
304 	if (g_bdev_io_submit_status == 0) {
305 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
306 			      SPDK_BDEV_IO_TYPE_WRITE);
307 		g_io_output_index++;
308 
309 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
310 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
311 		cb(child_io, g_child_io_status_flag, cb_arg);
312 	}
313 
314 	return g_bdev_io_submit_status;
315 }
316 
317 int
318 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
319 			    struct iovec *iov, int iovcnt,
320 			    uint64_t offset_blocks, uint64_t num_blocks,
321 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
322 			    struct spdk_bdev_ext_io_opts *opts)
323 {
324 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
325 }
326 
327 int
328 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
329 				struct iovec *iov, int iovcnt, void *md,
330 				uint64_t offset_blocks, uint64_t num_blocks,
331 				spdk_bdev_io_completion_cb cb, void *cb_arg)
332 {
333 	return spdk_bdev_writev_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
334 }
335 
336 int
337 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
338 		spdk_bdev_io_completion_cb cb, void *cb_arg)
339 {
340 	struct io_output *output = &g_io_output[g_io_output_index];
341 	struct spdk_bdev_io *child_io;
342 
343 	if (g_ignore_io_output) {
344 		return 0;
345 	}
346 
347 	if (g_bdev_io_submit_status == 0) {
348 		set_io_output(output, desc, ch, 0, 0, cb, cb_arg, SPDK_BDEV_IO_TYPE_RESET);
349 		g_io_output_index++;
350 
351 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
352 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
353 		cb(child_io, g_child_io_status_flag, cb_arg);
354 	}
355 
356 	return g_bdev_io_submit_status;
357 }
358 
359 int
360 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
361 		       uint64_t offset_blocks, uint64_t num_blocks,
362 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
363 {
364 	struct io_output *output = &g_io_output[g_io_output_index];
365 	struct spdk_bdev_io *child_io;
366 
367 	if (g_ignore_io_output) {
368 		return 0;
369 	}
370 
371 	if (g_bdev_io_submit_status == 0) {
372 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
373 			      SPDK_BDEV_IO_TYPE_UNMAP);
374 		g_io_output_index++;
375 
376 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
377 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
378 		cb(child_io, g_child_io_status_flag, cb_arg);
379 	}
380 
381 	return g_bdev_io_submit_status;
382 }
383 
384 void
385 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
386 {
387 	CU_ASSERT(bdeverrno == 0);
388 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
389 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
390 }
391 
392 void
393 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
394 {
395 	int ret;
396 
397 	bdev->internal.unregister_cb = cb_fn;
398 	bdev->internal.unregister_ctx = cb_arg;
399 
400 	ret = bdev->fn_table->destruct(bdev->ctxt);
401 	CU_ASSERT(ret == 1);
402 
403 	poll_threads();
404 }
405 
406 int
407 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
408 		   void *event_ctx, struct spdk_bdev_desc **_desc)
409 {
410 	struct spdk_bdev *bdev;
411 
412 	bdev = spdk_bdev_get_by_name(bdev_name);
413 	if (bdev == NULL) {
414 		return -ENODEV;
415 	}
416 
417 	*_desc = (void *)bdev;
418 	return 0;
419 }
420 
421 struct spdk_bdev *
422 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
423 {
424 	return (void *)desc;
425 }
426 
427 int
428 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
429 {
430 	if (!g_test_multi_raids) {
431 		struct rpc_bdev_raid_create *req = g_rpc_req;
432 		if (strcmp(name, "strip_size_kb") == 0) {
433 			CU_ASSERT(req->strip_size_kb == val);
434 		} else if (strcmp(name, "blocklen_shift") == 0) {
435 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
436 		} else if (strcmp(name, "num_base_bdevs") == 0) {
437 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
438 		} else if (strcmp(name, "state") == 0) {
439 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
440 		} else if (strcmp(name, "destruct_called") == 0) {
441 			CU_ASSERT(val == 0);
442 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
443 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
444 		}
445 	}
446 	return 0;
447 }
448 
449 int
450 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
451 {
452 	if (g_test_multi_raids) {
453 		if (strcmp(name, "name") == 0) {
454 			g_get_raids_output[g_get_raids_count] = strdup(val);
455 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
456 			g_get_raids_count++;
457 		}
458 	} else {
459 		struct rpc_bdev_raid_create *req = g_rpc_req;
460 		if (strcmp(name, "raid_level") == 0) {
461 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
462 		}
463 	}
464 	return 0;
465 }
466 
467 int
468 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
469 {
470 	if (!g_test_multi_raids) {
471 		struct rpc_bdev_raid_create *req = g_rpc_req;
472 		if (strcmp(name, "superblock") == 0) {
473 			CU_ASSERT(val == req->superblock_enabled);
474 		}
475 	}
476 	return 0;
477 }
478 
479 void
480 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
481 {
482 	if (bdev_io) {
483 		free(bdev_io);
484 	}
485 }
486 
487 /* It will cache split IOs for verification */
488 int
489 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
490 		       struct iovec *iov, int iovcnt,
491 		       uint64_t offset_blocks, uint64_t num_blocks,
492 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
493 {
494 	struct io_output *output = &g_io_output[g_io_output_index];
495 	struct spdk_bdev_io *child_io;
496 
497 	if (g_ignore_io_output) {
498 		return 0;
499 	}
500 
501 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
502 	if (g_bdev_io_submit_status == 0) {
503 		set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
504 			      SPDK_BDEV_IO_TYPE_READ);
505 		g_io_output_index++;
506 
507 		child_io = calloc(1, sizeof(struct spdk_bdev_io));
508 		SPDK_CU_ASSERT_FATAL(child_io != NULL);
509 		cb(child_io, g_child_io_status_flag, cb_arg);
510 	}
511 
512 	return g_bdev_io_submit_status;
513 }
514 
515 int
516 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
517 			   struct iovec *iov, int iovcnt,
518 			   uint64_t offset_blocks, uint64_t num_blocks,
519 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
520 			   struct spdk_bdev_ext_io_opts *opts)
521 {
522 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
523 }
524 
525 int
526 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc,	struct spdk_io_channel *ch,
527 			       struct iovec *iov, int iovcnt, void *md,
528 			       uint64_t offset_blocks, uint64_t num_blocks,
529 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
530 {
531 	return spdk_bdev_readv_blocks(desc, ch, iov, iovcnt, offset_blocks, num_blocks, cb, cb_arg);
532 }
533 
534 
535 void
536 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
537 {
538 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
539 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
540 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
541 	bdev->internal.claim.v1.module = NULL;
542 }
543 
544 int
545 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
546 			    struct spdk_bdev_module *module)
547 {
548 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
549 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
550 		return -1;
551 	}
552 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
553 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
554 	bdev->internal.claim.v1.module = module;
555 	return 0;
556 }
557 
558 int
559 spdk_json_decode_object(const struct spdk_json_val *values,
560 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
561 			void *out)
562 {
563 	struct rpc_bdev_raid_create *req, *_out;
564 	size_t i;
565 
566 	if (g_json_decode_obj_err) {
567 		return -1;
568 	} else if (g_json_decode_obj_create) {
569 		req = g_rpc_req;
570 		_out = out;
571 
572 		_out->name = strdup(req->name);
573 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
574 		_out->strip_size_kb = req->strip_size_kb;
575 		_out->level = req->level;
576 		_out->superblock_enabled = req->superblock_enabled;
577 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
578 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
579 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
580 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
581 		}
582 	} else {
583 		memcpy(out, g_rpc_req, g_rpc_req_size);
584 	}
585 
586 	return 0;
587 }
588 
589 struct spdk_json_write_ctx *
590 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
591 {
592 	return (void *)1;
593 }
594 
595 void
596 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
597 				 int error_code, const char *msg)
598 {
599 	g_rpc_err = 1;
600 }
601 
602 void
603 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
604 				     int error_code, const char *fmt, ...)
605 {
606 	g_rpc_err = 1;
607 }
608 
609 struct spdk_bdev *
610 spdk_bdev_get_by_name(const char *bdev_name)
611 {
612 	struct spdk_bdev *bdev;
613 
614 	if (!TAILQ_EMPTY(&g_bdev_list)) {
615 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
616 			if (strcmp(bdev_name, bdev->name) == 0) {
617 				return bdev;
618 			}
619 		}
620 	}
621 
622 	return NULL;
623 }
624 
625 int
626 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
627 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
628 {
629 	if (cb_fn) {
630 		cb_fn(cb_arg, 0);
631 	}
632 
633 	return 0;
634 }
635 
636 int
637 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
638 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
639 {
640 	if (cb_fn) {
641 		cb_fn(cb_arg, 0);
642 	}
643 
644 	return 0;
645 }
646 
647 static void
648 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
649 {
650 	if (bdev_io->u.bdev.iovs) {
651 		if (bdev_io->u.bdev.iovs->iov_base) {
652 			free(bdev_io->u.bdev.iovs->iov_base);
653 		}
654 		free(bdev_io->u.bdev.iovs);
655 	}
656 	free(bdev_io);
657 }
658 
659 static void
660 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
661 		   uint64_t lba, uint64_t blocks, int16_t iotype)
662 {
663 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
664 
665 	bdev_io->bdev = bdev;
666 	bdev_io->u.bdev.offset_blocks = lba;
667 	bdev_io->u.bdev.num_blocks = blocks;
668 	bdev_io->type = iotype;
669 
670 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
671 		return;
672 	}
673 
674 	bdev_io->u.bdev.iovcnt = 1;
675 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
676 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
677 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len);
678 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
679 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len;
680 	bdev_io->internal.ch = channel;
681 }
682 
683 static void
684 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
685 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
686 {
687 	uint8_t index = 0;
688 	struct io_output *output;
689 
690 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
691 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
692 	SPDK_CU_ASSERT_FATAL(io_status != INVALID_IO_SUBMIT);
693 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
694 
695 	CU_ASSERT(g_io_output_index == num_base_drives);
696 	for (index = 0; index < g_io_output_index; index++) {
697 		output = &g_io_output[index];
698 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
699 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
700 		CU_ASSERT(bdev_io->type == output->iotype);
701 	}
702 	CU_ASSERT(g_io_comp_status == io_status);
703 }
704 
705 static void
706 verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
707 	  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
708 {
709 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
710 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
711 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
712 			     strip_shift;
713 	uint32_t splits_reqd = (end_strip - start_strip + 1);
714 	uint32_t strip;
715 	uint64_t pd_strip;
716 	uint8_t pd_idx;
717 	uint32_t offset_in_strip;
718 	uint64_t pd_lba;
719 	uint64_t pd_blocks;
720 	uint32_t index = 0;
721 	struct io_output *output;
722 
723 	if (io_status == INVALID_IO_SUBMIT) {
724 		CU_ASSERT(g_io_comp_status == false);
725 		return;
726 	}
727 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
728 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
729 
730 	CU_ASSERT(splits_reqd == g_io_output_index);
731 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
732 		pd_strip = strip / num_base_drives;
733 		pd_idx = strip % num_base_drives;
734 		if (strip == start_strip) {
735 			offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
736 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
737 			if (strip == end_strip) {
738 				pd_blocks = bdev_io->u.bdev.num_blocks;
739 			} else {
740 				pd_blocks = g_strip_size - offset_in_strip;
741 			}
742 		} else if (strip == end_strip) {
743 			pd_lba = pd_strip << strip_shift;
744 			pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
745 				     (g_strip_size - 1)) + 1;
746 		} else {
747 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
748 			pd_blocks = raid_bdev->strip_size;
749 		}
750 		output = &g_io_output[index];
751 		CU_ASSERT(pd_lba == output->offset_blocks);
752 		CU_ASSERT(pd_blocks == output->num_blocks);
753 		CU_ASSERT(ch_ctx->base_channel[pd_idx] == output->ch);
754 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
755 		CU_ASSERT(bdev_io->type == output->iotype);
756 	}
757 	CU_ASSERT(g_io_comp_status == io_status);
758 }
759 
760 static void
761 verify_io_without_payload(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
762 			  struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev,
763 			  uint32_t io_status)
764 {
765 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
766 	uint64_t start_offset_in_strip = bdev_io->u.bdev.offset_blocks % g_strip_size;
767 	uint64_t end_offset_in_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) %
768 				       g_strip_size;
769 	uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
770 	uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
771 			     strip_shift;
772 	uint8_t n_disks_involved;
773 	uint64_t start_strip_disk_idx;
774 	uint64_t end_strip_disk_idx;
775 	uint64_t nblocks_in_start_disk;
776 	uint64_t offset_in_start_disk;
777 	uint8_t disk_idx;
778 	uint64_t base_io_idx;
779 	uint64_t sum_nblocks = 0;
780 	struct io_output *output;
781 
782 	if (io_status == INVALID_IO_SUBMIT) {
783 		CU_ASSERT(g_io_comp_status == false);
784 		return;
785 	}
786 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
787 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
788 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_READ);
789 	SPDK_CU_ASSERT_FATAL(bdev_io->type != SPDK_BDEV_IO_TYPE_WRITE);
790 
791 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
792 	CU_ASSERT(n_disks_involved == g_io_output_index);
793 
794 	start_strip_disk_idx = start_strip % num_base_drives;
795 	end_strip_disk_idx = end_strip % num_base_drives;
796 
797 	offset_in_start_disk = g_io_output[0].offset_blocks;
798 	nblocks_in_start_disk = g_io_output[0].num_blocks;
799 
800 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
801 	     base_io_idx++, disk_idx++) {
802 		uint64_t start_offset_in_disk;
803 		uint64_t end_offset_in_disk;
804 
805 		output = &g_io_output[base_io_idx];
806 
807 		/* round disk_idx */
808 		if (disk_idx >= num_base_drives) {
809 			disk_idx %= num_base_drives;
810 		}
811 
812 		/* start_offset_in_disk aligned in strip check:
813 		 * The first base io has a same start_offset_in_strip with the whole raid io.
814 		 * Other base io should have aligned start_offset_in_strip which is 0.
815 		 */
816 		start_offset_in_disk = output->offset_blocks;
817 		if (base_io_idx == 0) {
818 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
819 		} else {
820 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
821 		}
822 
823 		/* end_offset_in_disk aligned in strip check:
824 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
825 		 * with the whole raid io.
826 		 * Other base io should have aligned end_offset_in_strip.
827 		 */
828 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
829 		if (disk_idx == end_strip_disk_idx) {
830 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
831 		} else {
832 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
833 		}
834 
835 		/* start_offset_in_disk compared with start_disk.
836 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
837 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
838 		 *    must be less than strip size.
839 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
840 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
841 		 *    be less than strip size.
842 		 */
843 		if (disk_idx > start_strip_disk_idx) {
844 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
845 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
846 		} else if (disk_idx < start_strip_disk_idx) {
847 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
848 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
849 		}
850 
851 		/* nblocks compared with start_disk:
852 		 * The gap between them must be within a strip size.
853 		 */
854 		if (output->num_blocks <= nblocks_in_start_disk) {
855 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
856 		} else {
857 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
858 		}
859 
860 		sum_nblocks += output->num_blocks;
861 
862 		CU_ASSERT(ch_ctx->base_channel[disk_idx] == output->ch);
863 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
864 		CU_ASSERT(bdev_io->type == output->iotype);
865 	}
866 
867 	/* Sum of each nblocks should be same with raid bdev_io */
868 	CU_ASSERT(bdev_io->u.bdev.num_blocks == sum_nblocks);
869 
870 	CU_ASSERT(g_io_comp_status == io_status);
871 }
872 
873 static void
874 verify_raid_bdev_present(const char *name, bool presence)
875 {
876 	struct raid_bdev *pbdev;
877 	bool   pbdev_found;
878 
879 	pbdev_found = false;
880 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
881 		if (strcmp(pbdev->bdev.name, name) == 0) {
882 			pbdev_found = true;
883 			break;
884 		}
885 	}
886 	if (presence == true) {
887 		CU_ASSERT(pbdev_found == true);
888 	} else {
889 		CU_ASSERT(pbdev_found == false);
890 	}
891 }
892 
893 static void
894 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
895 {
896 	struct raid_bdev *pbdev;
897 	struct raid_base_bdev_info *base_info;
898 	struct spdk_bdev *bdev = NULL;
899 	bool   pbdev_found;
900 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
901 
902 	pbdev_found = false;
903 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
904 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
905 			pbdev_found = true;
906 			if (presence == false) {
907 				break;
908 			}
909 			CU_ASSERT(pbdev->base_bdev_info != NULL);
910 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
911 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
912 					g_block_len)));
913 			CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
914 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
915 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
916 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
917 			CU_ASSERT(pbdev->level == r->level);
918 			CU_ASSERT(pbdev->base_bdev_info != NULL);
919 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
920 				CU_ASSERT(base_info->desc != NULL);
921 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
922 				CU_ASSERT(bdev != NULL);
923 				CU_ASSERT(base_info->remove_scheduled == false);
924 				CU_ASSERT((pbdev->sb != NULL && base_info->data_offset != 0) ||
925 					  (pbdev->sb == NULL && base_info->data_offset == 0));
926 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
927 
928 				if (bdev && base_info->data_size < min_blockcnt) {
929 					min_blockcnt = base_info->data_size;
930 				}
931 			}
932 			CU_ASSERT((((min_blockcnt / (r->strip_size_kb * 1024 / g_block_len)) *
933 				    (r->strip_size_kb * 1024 / g_block_len)) *
934 				   r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
935 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
936 			CU_ASSERT(pbdev->bdev.write_cache == 0);
937 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
938 			if (pbdev->num_base_bdevs > 1) {
939 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
940 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
941 			} else {
942 				CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
943 				CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
944 			}
945 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
946 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
947 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
948 			break;
949 		}
950 	}
951 	if (presence == true) {
952 		CU_ASSERT(pbdev_found == true);
953 	} else {
954 		CU_ASSERT(pbdev_found == false);
955 	}
956 }
957 
958 static void
959 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
960 		 uint8_t g_max_raids,
961 		 char **g_get_raids_output, uint32_t g_get_raids_count)
962 {
963 	uint8_t i, j;
964 	bool found;
965 
966 	CU_ASSERT(g_max_raids == g_get_raids_count);
967 	if (g_max_raids == g_get_raids_count) {
968 		for (i = 0; i < g_max_raids; i++) {
969 			found = false;
970 			for (j = 0; j < g_max_raids; j++) {
971 				if (construct_req[i].name &&
972 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
973 					found = true;
974 					break;
975 				}
976 			}
977 			CU_ASSERT(found == true);
978 		}
979 	}
980 }
981 
982 static void
983 create_base_bdevs(uint32_t bbdev_start_idx)
984 {
985 	uint8_t i;
986 	struct spdk_bdev *base_bdev;
987 	char name[16];
988 
989 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
990 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
991 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
992 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
993 		base_bdev->name = strdup(name);
994 		spdk_uuid_generate(&base_bdev->uuid);
995 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
996 		base_bdev->blocklen = g_block_len;
997 		base_bdev->blockcnt = BLOCK_CNT;
998 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
999 	}
1000 }
1001 
1002 static void
1003 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1004 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
1005 {
1006 	uint8_t i;
1007 	char name[16];
1008 	uint8_t bbdev_idx = bbdev_start_idx;
1009 
1010 	r->name = strdup(raid_name);
1011 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1012 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
1013 	r->level = RAID0;
1014 	r->superblock_enabled = superblock_enabled;
1015 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
1016 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
1017 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
1018 		r->base_bdevs.base_bdevs[i] = strdup(name);
1019 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
1020 	}
1021 	if (create_base_bdev == true) {
1022 		create_base_bdevs(bbdev_start_idx);
1023 	}
1024 	g_rpc_req = r;
1025 	g_rpc_req_size = sizeof(*r);
1026 }
1027 
1028 static void
1029 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
1030 			    uint8_t bbdev_start_idx, bool create_base_bdev,
1031 			    uint8_t json_decode_obj_err, bool superblock_enabled)
1032 {
1033 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
1034 
1035 	g_rpc_err = 0;
1036 	g_json_decode_obj_create = 1;
1037 	g_json_decode_obj_err = json_decode_obj_err;
1038 	g_config_level_create = 0;
1039 	g_test_multi_raids = 0;
1040 }
1041 
1042 static void
1043 free_test_req(struct rpc_bdev_raid_create *r)
1044 {
1045 	uint8_t i;
1046 
1047 	free(r->name);
1048 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
1049 		free(r->base_bdevs.base_bdevs[i]);
1050 	}
1051 }
1052 
1053 static void
1054 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
1055 			    uint8_t json_decode_obj_err)
1056 {
1057 	r->name = strdup(raid_name);
1058 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
1059 
1060 	g_rpc_req = r;
1061 	g_rpc_req_size = sizeof(*r);
1062 	g_rpc_err = 0;
1063 	g_json_decode_obj_create = 0;
1064 	g_json_decode_obj_err = json_decode_obj_err;
1065 	g_config_level_create = 0;
1066 	g_test_multi_raids = 0;
1067 }
1068 
1069 static void
1070 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
1071 		     uint8_t json_decode_obj_err)
1072 {
1073 	r->category = strdup(category);
1074 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
1075 
1076 	g_rpc_req = r;
1077 	g_rpc_req_size = sizeof(*r);
1078 	g_rpc_err = 0;
1079 	g_json_decode_obj_create = 0;
1080 	g_json_decode_obj_err = json_decode_obj_err;
1081 	g_config_level_create = 0;
1082 	g_test_multi_raids = 1;
1083 	g_get_raids_count = 0;
1084 }
1085 
1086 static void
1087 test_create_raid(void)
1088 {
1089 	struct rpc_bdev_raid_create req;
1090 	struct rpc_bdev_raid_delete delete_req;
1091 
1092 	set_globals();
1093 	CU_ASSERT(raid_bdev_init() == 0);
1094 
1095 	verify_raid_bdev_present("raid1", false);
1096 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1097 	rpc_bdev_raid_create(NULL, NULL);
1098 	CU_ASSERT(g_rpc_err == 0);
1099 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1100 	free_test_req(&req);
1101 
1102 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1103 	rpc_bdev_raid_delete(NULL, NULL);
1104 	CU_ASSERT(g_rpc_err == 0);
1105 	raid_bdev_exit();
1106 	base_bdevs_cleanup();
1107 	reset_globals();
1108 }
1109 
1110 static void
1111 test_delete_raid(void)
1112 {
1113 	struct rpc_bdev_raid_create construct_req;
1114 	struct rpc_bdev_raid_delete delete_req;
1115 
1116 	set_globals();
1117 	CU_ASSERT(raid_bdev_init() == 0);
1118 
1119 	verify_raid_bdev_present("raid1", false);
1120 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1121 	rpc_bdev_raid_create(NULL, NULL);
1122 	CU_ASSERT(g_rpc_err == 0);
1123 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1124 	free_test_req(&construct_req);
1125 
1126 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1127 	rpc_bdev_raid_delete(NULL, NULL);
1128 	CU_ASSERT(g_rpc_err == 0);
1129 	verify_raid_bdev_present("raid1", false);
1130 
1131 	raid_bdev_exit();
1132 	base_bdevs_cleanup();
1133 	reset_globals();
1134 }
1135 
1136 static void
1137 test_create_raid_invalid_args(void)
1138 {
1139 	struct rpc_bdev_raid_create req;
1140 	struct rpc_bdev_raid_delete destroy_req;
1141 	struct raid_bdev *raid_bdev;
1142 
1143 	set_globals();
1144 	CU_ASSERT(raid_bdev_init() == 0);
1145 
1146 	verify_raid_bdev_present("raid1", false);
1147 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1148 	req.level = INVALID_RAID_LEVEL;
1149 	rpc_bdev_raid_create(NULL, NULL);
1150 	CU_ASSERT(g_rpc_err == 1);
1151 	free_test_req(&req);
1152 	verify_raid_bdev_present("raid1", false);
1153 
1154 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
1155 	rpc_bdev_raid_create(NULL, NULL);
1156 	CU_ASSERT(g_rpc_err == 1);
1157 	free_test_req(&req);
1158 	verify_raid_bdev_present("raid1", false);
1159 
1160 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1161 	req.strip_size_kb = 1231;
1162 	rpc_bdev_raid_create(NULL, NULL);
1163 	CU_ASSERT(g_rpc_err == 1);
1164 	free_test_req(&req);
1165 	verify_raid_bdev_present("raid1", false);
1166 
1167 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1168 	rpc_bdev_raid_create(NULL, NULL);
1169 	CU_ASSERT(g_rpc_err == 0);
1170 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1171 	free_test_req(&req);
1172 
1173 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
1174 	rpc_bdev_raid_create(NULL, NULL);
1175 	CU_ASSERT(g_rpc_err == 1);
1176 	free_test_req(&req);
1177 
1178 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
1179 	rpc_bdev_raid_create(NULL, NULL);
1180 	CU_ASSERT(g_rpc_err == 1);
1181 	free_test_req(&req);
1182 	verify_raid_bdev_present("raid2", false);
1183 
1184 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1185 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1186 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1187 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1188 	rpc_bdev_raid_create(NULL, NULL);
1189 	CU_ASSERT(g_rpc_err == 1);
1190 	free_test_req(&req);
1191 	verify_raid_bdev_present("raid2", false);
1192 
1193 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1194 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1195 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1196 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1197 	rpc_bdev_raid_create(NULL, NULL);
1198 	CU_ASSERT(g_rpc_err == 0);
1199 	free_test_req(&req);
1200 	verify_raid_bdev_present("raid2", true);
1201 	raid_bdev = raid_bdev_find_by_name("raid2");
1202 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1203 	check_and_remove_raid_bdev(raid_bdev);
1204 
1205 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1206 	rpc_bdev_raid_create(NULL, NULL);
1207 	CU_ASSERT(g_rpc_err == 0);
1208 	free_test_req(&req);
1209 	verify_raid_bdev_present("raid2", true);
1210 	verify_raid_bdev_present("raid1", true);
1211 
1212 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1213 	rpc_bdev_raid_delete(NULL, NULL);
1214 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1215 	rpc_bdev_raid_delete(NULL, NULL);
1216 	raid_bdev_exit();
1217 	base_bdevs_cleanup();
1218 	reset_globals();
1219 }
1220 
1221 static void
1222 test_delete_raid_invalid_args(void)
1223 {
1224 	struct rpc_bdev_raid_create construct_req;
1225 	struct rpc_bdev_raid_delete destroy_req;
1226 
1227 	set_globals();
1228 	CU_ASSERT(raid_bdev_init() == 0);
1229 
1230 	verify_raid_bdev_present("raid1", false);
1231 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1232 	rpc_bdev_raid_create(NULL, NULL);
1233 	CU_ASSERT(g_rpc_err == 0);
1234 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1235 	free_test_req(&construct_req);
1236 
1237 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1238 	rpc_bdev_raid_delete(NULL, NULL);
1239 	CU_ASSERT(g_rpc_err == 1);
1240 
1241 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1242 	rpc_bdev_raid_delete(NULL, NULL);
1243 	CU_ASSERT(g_rpc_err == 1);
1244 	free(destroy_req.name);
1245 	verify_raid_bdev_present("raid1", true);
1246 
1247 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1248 	rpc_bdev_raid_delete(NULL, NULL);
1249 	CU_ASSERT(g_rpc_err == 0);
1250 	verify_raid_bdev_present("raid1", false);
1251 
1252 	raid_bdev_exit();
1253 	base_bdevs_cleanup();
1254 	reset_globals();
1255 }
1256 
1257 static void
1258 test_io_channel(void)
1259 {
1260 	struct rpc_bdev_raid_create req;
1261 	struct rpc_bdev_raid_delete destroy_req;
1262 	struct raid_bdev *pbdev;
1263 	struct spdk_io_channel *ch;
1264 	struct raid_bdev_io_channel *ch_ctx;
1265 	uint8_t i;
1266 
1267 	set_globals();
1268 	CU_ASSERT(raid_bdev_init() == 0);
1269 
1270 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1271 	verify_raid_bdev_present("raid1", false);
1272 	rpc_bdev_raid_create(NULL, NULL);
1273 	CU_ASSERT(g_rpc_err == 0);
1274 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1275 
1276 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1277 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1278 			break;
1279 		}
1280 	}
1281 	CU_ASSERT(pbdev != NULL);
1282 
1283 	ch = spdk_get_io_channel(pbdev);
1284 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1285 
1286 	ch_ctx = spdk_io_channel_get_ctx(ch);
1287 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1288 
1289 	for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
1290 		CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == &g_io_channel);
1291 	}
1292 	free_test_req(&req);
1293 
1294 	spdk_put_io_channel(ch);
1295 
1296 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1297 	rpc_bdev_raid_delete(NULL, NULL);
1298 	CU_ASSERT(g_rpc_err == 0);
1299 	verify_raid_bdev_present("raid1", false);
1300 
1301 	raid_bdev_exit();
1302 	base_bdevs_cleanup();
1303 	reset_globals();
1304 }
1305 
1306 static void
1307 test_write_io(void)
1308 {
1309 	struct rpc_bdev_raid_create req;
1310 	struct rpc_bdev_raid_delete destroy_req;
1311 	struct raid_bdev *pbdev;
1312 	struct spdk_io_channel *ch;
1313 	struct raid_bdev_io_channel *ch_ctx;
1314 	uint8_t i;
1315 	struct spdk_bdev_io *bdev_io;
1316 	uint64_t io_len;
1317 	uint64_t lba = 0;
1318 
1319 	set_globals();
1320 	CU_ASSERT(raid_bdev_init() == 0);
1321 
1322 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1323 	verify_raid_bdev_present("raid1", false);
1324 	rpc_bdev_raid_create(NULL, NULL);
1325 	CU_ASSERT(g_rpc_err == 0);
1326 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1327 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1328 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1329 			break;
1330 		}
1331 	}
1332 	CU_ASSERT(pbdev != NULL);
1333 
1334 	ch = spdk_get_io_channel(pbdev);
1335 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1336 
1337 	ch_ctx = spdk_io_channel_get_ctx(ch);
1338 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1339 
1340 	/* test 2 IO sizes based on global strip size set earlier */
1341 	for (i = 0; i < 2; i++) {
1342 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1343 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1344 		io_len = (g_strip_size / 2) << i;
1345 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1346 		lba += g_strip_size;
1347 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1348 		g_io_output_index = 0;
1349 		raid_bdev_submit_request(ch, bdev_io);
1350 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1351 			  g_child_io_status_flag);
1352 		bdev_io_cleanup(bdev_io);
1353 	}
1354 
1355 	free_test_req(&req);
1356 	spdk_put_io_channel(ch);
1357 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1358 	rpc_bdev_raid_delete(NULL, NULL);
1359 	CU_ASSERT(g_rpc_err == 0);
1360 	verify_raid_bdev_present("raid1", false);
1361 
1362 	raid_bdev_exit();
1363 	base_bdevs_cleanup();
1364 	reset_globals();
1365 }
1366 
1367 static void
1368 test_read_io(void)
1369 {
1370 	struct rpc_bdev_raid_create req;
1371 	struct rpc_bdev_raid_delete destroy_req;
1372 	struct raid_bdev *pbdev;
1373 	struct spdk_io_channel *ch;
1374 	struct raid_bdev_io_channel *ch_ctx;
1375 	uint8_t i;
1376 	struct spdk_bdev_io *bdev_io;
1377 	uint64_t io_len;
1378 	uint64_t lba;
1379 
1380 	set_globals();
1381 	CU_ASSERT(raid_bdev_init() == 0);
1382 
1383 	verify_raid_bdev_present("raid1", false);
1384 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1385 	rpc_bdev_raid_create(NULL, NULL);
1386 	CU_ASSERT(g_rpc_err == 0);
1387 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1388 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1389 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1390 			break;
1391 		}
1392 	}
1393 	CU_ASSERT(pbdev != NULL);
1394 
1395 	ch = spdk_get_io_channel(pbdev);
1396 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1397 
1398 	ch_ctx = spdk_io_channel_get_ctx(ch);
1399 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1400 
1401 	/* test 2 IO sizes based on global strip size set earlier */
1402 	lba = 0;
1403 	for (i = 0; i < 2; i++) {
1404 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1405 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1406 		io_len = (g_strip_size / 2) << i;
1407 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
1408 		lba += g_strip_size;
1409 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1410 		g_io_output_index = 0;
1411 		raid_bdev_submit_request(ch, bdev_io);
1412 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1413 			  g_child_io_status_flag);
1414 		bdev_io_cleanup(bdev_io);
1415 	}
1416 
1417 	free_test_req(&req);
1418 	spdk_put_io_channel(ch);
1419 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1420 	rpc_bdev_raid_delete(NULL, NULL);
1421 	CU_ASSERT(g_rpc_err == 0);
1422 	verify_raid_bdev_present("raid1", false);
1423 
1424 	raid_bdev_exit();
1425 	base_bdevs_cleanup();
1426 	reset_globals();
1427 }
1428 
1429 static void
1430 raid_bdev_io_generate_by_strips(uint64_t n_strips)
1431 {
1432 	uint64_t lba;
1433 	uint64_t nblocks;
1434 	uint64_t start_offset;
1435 	uint64_t end_offset;
1436 	uint64_t offsets_in_strip[3];
1437 	uint64_t start_bdev_idx;
1438 	uint64_t start_bdev_offset;
1439 	uint64_t start_bdev_idxs[3];
1440 	int i, j, l;
1441 
1442 	/* 3 different situations of offset in strip */
1443 	offsets_in_strip[0] = 0;
1444 	offsets_in_strip[1] = g_strip_size >> 1;
1445 	offsets_in_strip[2] = g_strip_size - 1;
1446 
1447 	/* 3 different situations of start_bdev_idx */
1448 	start_bdev_idxs[0] = 0;
1449 	start_bdev_idxs[1] = g_max_base_drives >> 1;
1450 	start_bdev_idxs[2] = g_max_base_drives - 1;
1451 
1452 	/* consider different offset in strip */
1453 	for (i = 0; i < 3; i++) {
1454 		start_offset = offsets_in_strip[i];
1455 		for (j = 0; j < 3; j++) {
1456 			end_offset = offsets_in_strip[j];
1457 			if (n_strips == 1 && start_offset > end_offset) {
1458 				continue;
1459 			}
1460 
1461 			/* consider at which base_bdev lba is started. */
1462 			for (l = 0; l < 3; l++) {
1463 				start_bdev_idx = start_bdev_idxs[l];
1464 				start_bdev_offset = start_bdev_idx * g_strip_size;
1465 				lba = g_lba_offset + start_bdev_offset + start_offset;
1466 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
1467 
1468 				g_io_ranges[g_io_range_idx].lba = lba;
1469 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
1470 
1471 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
1472 				g_io_range_idx++;
1473 			}
1474 		}
1475 	}
1476 }
1477 
1478 static void
1479 raid_bdev_io_generate(void)
1480 {
1481 	uint64_t n_strips;
1482 	uint64_t n_strips_span = g_max_base_drives;
1483 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
1484 				      g_max_base_drives * 2, g_max_base_drives * 3,
1485 				      g_max_base_drives * 4
1486 				     };
1487 	uint32_t i;
1488 
1489 	g_io_range_idx = 0;
1490 
1491 	/* consider different number of strips from 1 to strips spanned base bdevs,
1492 	 * and even to times of strips spanned base bdevs
1493 	 */
1494 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
1495 		raid_bdev_io_generate_by_strips(n_strips);
1496 	}
1497 
1498 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
1499 		n_strips = n_strips_times[i];
1500 		raid_bdev_io_generate_by_strips(n_strips);
1501 	}
1502 }
1503 
1504 static void
1505 test_unmap_io(void)
1506 {
1507 	struct rpc_bdev_raid_create req;
1508 	struct rpc_bdev_raid_delete destroy_req;
1509 	struct raid_bdev *pbdev;
1510 	struct spdk_io_channel *ch;
1511 	struct raid_bdev_io_channel *ch_ctx;
1512 	struct spdk_bdev_io *bdev_io;
1513 	uint32_t count;
1514 	uint64_t io_len;
1515 	uint64_t lba;
1516 
1517 	set_globals();
1518 	CU_ASSERT(raid_bdev_init() == 0);
1519 
1520 	verify_raid_bdev_present("raid1", false);
1521 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1522 	rpc_bdev_raid_create(NULL, NULL);
1523 	CU_ASSERT(g_rpc_err == 0);
1524 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1525 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1526 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1527 			break;
1528 		}
1529 	}
1530 	CU_ASSERT(pbdev != NULL);
1531 
1532 	ch = spdk_get_io_channel(pbdev);
1533 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1534 
1535 	ch_ctx = spdk_io_channel_get_ctx(ch);
1536 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1537 
1538 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
1539 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_FLUSH) == true);
1540 
1541 	raid_bdev_io_generate();
1542 	for (count = 0; count < g_io_range_idx; count++) {
1543 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1544 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1545 		io_len = g_io_ranges[count].nblocks;
1546 		lba = g_io_ranges[count].lba;
1547 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
1548 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1549 		g_io_output_index = 0;
1550 		raid_bdev_submit_request(ch, bdev_io);
1551 		verify_io_without_payload(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1552 					  g_child_io_status_flag);
1553 		bdev_io_cleanup(bdev_io);
1554 	}
1555 
1556 	free_test_req(&req);
1557 	spdk_put_io_channel(ch);
1558 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1559 	rpc_bdev_raid_delete(NULL, NULL);
1560 	CU_ASSERT(g_rpc_err == 0);
1561 	verify_raid_bdev_present("raid1", false);
1562 
1563 	raid_bdev_exit();
1564 	base_bdevs_cleanup();
1565 	reset_globals();
1566 }
1567 
1568 /* Test IO failures */
1569 static void
1570 test_io_failure(void)
1571 {
1572 	struct rpc_bdev_raid_create req;
1573 	struct rpc_bdev_raid_delete destroy_req;
1574 	struct raid_bdev *pbdev;
1575 	struct spdk_io_channel *ch;
1576 	struct raid_bdev_io_channel *ch_ctx;
1577 	struct spdk_bdev_io *bdev_io;
1578 	uint32_t count;
1579 	uint64_t io_len;
1580 	uint64_t lba;
1581 
1582 	set_globals();
1583 	CU_ASSERT(raid_bdev_init() == 0);
1584 
1585 	verify_raid_bdev_present("raid1", false);
1586 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1587 	rpc_bdev_raid_create(NULL, NULL);
1588 	CU_ASSERT(g_rpc_err == 0);
1589 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1590 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1591 		if (strcmp(pbdev->bdev.name, req.name) == 0) {
1592 			break;
1593 		}
1594 	}
1595 	CU_ASSERT(pbdev != NULL);
1596 
1597 	ch = spdk_get_io_channel(pbdev);
1598 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1599 
1600 	ch_ctx = spdk_io_channel_get_ctx(ch);
1601 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1602 
1603 	lba = 0;
1604 	for (count = 0; count < 1; count++) {
1605 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1606 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1607 		io_len = (g_strip_size / 2) << count;
1608 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
1609 		lba += g_strip_size;
1610 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1611 		g_io_output_index = 0;
1612 		raid_bdev_submit_request(ch, bdev_io);
1613 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1614 			  INVALID_IO_SUBMIT);
1615 		bdev_io_cleanup(bdev_io);
1616 	}
1617 
1618 
1619 	lba = 0;
1620 	g_child_io_status_flag = false;
1621 	for (count = 0; count < 1; count++) {
1622 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1623 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1624 		io_len = (g_strip_size / 2) << count;
1625 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
1626 		lba += g_strip_size;
1627 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1628 		g_io_output_index = 0;
1629 		raid_bdev_submit_request(ch, bdev_io);
1630 		verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1631 			  g_child_io_status_flag);
1632 		bdev_io_cleanup(bdev_io);
1633 	}
1634 
1635 	free_test_req(&req);
1636 	spdk_put_io_channel(ch);
1637 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1638 	rpc_bdev_raid_delete(NULL, NULL);
1639 	CU_ASSERT(g_rpc_err == 0);
1640 	verify_raid_bdev_present("raid1", false);
1641 
1642 	raid_bdev_exit();
1643 	base_bdevs_cleanup();
1644 	reset_globals();
1645 }
1646 
1647 /* Test reset IO */
1648 static void
1649 test_reset_io(void)
1650 {
1651 	struct rpc_bdev_raid_create req;
1652 	struct rpc_bdev_raid_delete destroy_req;
1653 	struct raid_bdev *pbdev;
1654 	struct spdk_io_channel *ch;
1655 	struct raid_bdev_io_channel *ch_ctx;
1656 	struct spdk_bdev_io *bdev_io;
1657 
1658 	set_globals();
1659 	CU_ASSERT(raid_bdev_init() == 0);
1660 
1661 	verify_raid_bdev_present("raid1", false);
1662 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1663 	rpc_bdev_raid_create(NULL, NULL);
1664 	CU_ASSERT(g_rpc_err == 0);
1665 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1666 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1667 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1668 			break;
1669 		}
1670 	}
1671 	CU_ASSERT(pbdev != NULL);
1672 
1673 	ch = spdk_get_io_channel(pbdev);
1674 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1675 
1676 	ch_ctx = spdk_io_channel_get_ctx(ch);
1677 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1678 
1679 	g_bdev_io_submit_status = 0;
1680 	g_child_io_status_flag = true;
1681 
1682 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1683 
1684 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1685 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1686 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1687 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1688 	g_io_output_index = 0;
1689 	raid_bdev_submit_request(ch, bdev_io);
1690 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1691 			true);
1692 	bdev_io_cleanup(bdev_io);
1693 
1694 	free_test_req(&req);
1695 	spdk_put_io_channel(ch);
1696 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1697 	rpc_bdev_raid_delete(NULL, NULL);
1698 	CU_ASSERT(g_rpc_err == 0);
1699 	verify_raid_bdev_present("raid1", false);
1700 
1701 	raid_bdev_exit();
1702 	base_bdevs_cleanup();
1703 	reset_globals();
1704 }
1705 
1706 /* Create multiple raids, destroy raids without IO, get_raids related tests */
1707 static void
1708 test_multi_raid_no_io(void)
1709 {
1710 	struct rpc_bdev_raid_create *construct_req;
1711 	struct rpc_bdev_raid_delete destroy_req;
1712 	struct rpc_bdev_raid_get_bdevs get_raids_req;
1713 	uint8_t i;
1714 	char name[16];
1715 	uint8_t bbdev_idx = 0;
1716 
1717 	set_globals();
1718 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
1719 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1720 	CU_ASSERT(raid_bdev_init() == 0);
1721 	for (i = 0; i < g_max_raids; i++) {
1722 		snprintf(name, 16, "%s%u", "raid", i);
1723 		verify_raid_bdev_present(name, false);
1724 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1725 		bbdev_idx += g_max_base_drives;
1726 		rpc_bdev_raid_create(NULL, NULL);
1727 		CU_ASSERT(g_rpc_err == 0);
1728 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1729 	}
1730 
1731 	create_get_raids_req(&get_raids_req, "all", 0);
1732 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1733 	CU_ASSERT(g_rpc_err == 0);
1734 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1735 	for (i = 0; i < g_get_raids_count; i++) {
1736 		free(g_get_raids_output[i]);
1737 	}
1738 
1739 	create_get_raids_req(&get_raids_req, "online", 0);
1740 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1741 	CU_ASSERT(g_rpc_err == 0);
1742 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1743 	for (i = 0; i < g_get_raids_count; i++) {
1744 		free(g_get_raids_output[i]);
1745 	}
1746 
1747 	create_get_raids_req(&get_raids_req, "configuring", 0);
1748 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1749 	CU_ASSERT(g_rpc_err == 0);
1750 	CU_ASSERT(g_get_raids_count == 0);
1751 
1752 	create_get_raids_req(&get_raids_req, "offline", 0);
1753 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1754 	CU_ASSERT(g_rpc_err == 0);
1755 	CU_ASSERT(g_get_raids_count == 0);
1756 
1757 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
1758 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1759 	CU_ASSERT(g_rpc_err == 1);
1760 	CU_ASSERT(g_get_raids_count == 0);
1761 
1762 	create_get_raids_req(&get_raids_req, "all", 1);
1763 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1764 	CU_ASSERT(g_rpc_err == 1);
1765 	free(get_raids_req.category);
1766 	CU_ASSERT(g_get_raids_count == 0);
1767 
1768 	create_get_raids_req(&get_raids_req, "all", 0);
1769 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1770 	CU_ASSERT(g_rpc_err == 0);
1771 	CU_ASSERT(g_get_raids_count == g_max_raids);
1772 	for (i = 0; i < g_get_raids_count; i++) {
1773 		free(g_get_raids_output[i]);
1774 	}
1775 
1776 	for (i = 0; i < g_max_raids; i++) {
1777 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
1778 		snprintf(name, 16, "%s", construct_req[i].name);
1779 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1780 		rpc_bdev_raid_delete(NULL, NULL);
1781 		CU_ASSERT(g_rpc_err == 0);
1782 		verify_raid_bdev_present(name, false);
1783 	}
1784 	raid_bdev_exit();
1785 	for (i = 0; i < g_max_raids; i++) {
1786 		free_test_req(&construct_req[i]);
1787 	}
1788 	free(construct_req);
1789 	base_bdevs_cleanup();
1790 	reset_globals();
1791 }
1792 
1793 /* Create multiple raids, fire IOs on raids */
1794 static void
1795 test_multi_raid_with_io(void)
1796 {
1797 	struct rpc_bdev_raid_create *construct_req;
1798 	struct rpc_bdev_raid_delete destroy_req;
1799 	uint8_t i;
1800 	char name[16];
1801 	uint8_t bbdev_idx = 0;
1802 	struct raid_bdev *pbdev;
1803 	struct spdk_io_channel **channels;
1804 	struct spdk_bdev_io *bdev_io;
1805 	uint64_t io_len;
1806 	uint64_t lba = 0;
1807 	int16_t iotype;
1808 
1809 	set_globals();
1810 	construct_req = calloc(g_max_raids, sizeof(struct rpc_bdev_raid_create));
1811 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1812 	CU_ASSERT(raid_bdev_init() == 0);
1813 	channels = calloc(g_max_raids, sizeof(*channels));
1814 	SPDK_CU_ASSERT_FATAL(channels != NULL);
1815 
1816 	for (i = 0; i < g_max_raids; i++) {
1817 		snprintf(name, 16, "%s%u", "raid", i);
1818 		verify_raid_bdev_present(name, false);
1819 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1820 		bbdev_idx += g_max_base_drives;
1821 		rpc_bdev_raid_create(NULL, NULL);
1822 		CU_ASSERT(g_rpc_err == 0);
1823 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1824 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1825 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1826 				break;
1827 			}
1828 		}
1829 		CU_ASSERT(pbdev != NULL);
1830 
1831 		channels[i] = spdk_get_io_channel(pbdev);
1832 		SPDK_CU_ASSERT_FATAL(channels[i] != NULL);
1833 	}
1834 
1835 	/* This will perform a write on the first raid and a read on the second. It can be
1836 	 * expanded in the future to perform r/w on each raid device in the event that
1837 	 * multiple raid levels are supported.
1838 	 */
1839 	for (i = 0; i < g_max_raids; i++) {
1840 		struct spdk_io_channel *ch = channels[i];
1841 		struct raid_bdev_io_channel *ch_ctx = spdk_io_channel_get_ctx(ch);
1842 
1843 		SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1844 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1845 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1846 		io_len = g_strip_size;
1847 		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
1848 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
1849 		g_io_output_index = 0;
1850 		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1851 			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
1852 				break;
1853 			}
1854 		}
1855 		bdev_io_initialize(bdev_io, ch, &pbdev->bdev, lba, io_len, iotype);
1856 		CU_ASSERT(pbdev != NULL);
1857 		raid_bdev_submit_request(ch, bdev_io);
1858 		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
1859 			  g_child_io_status_flag);
1860 		bdev_io_cleanup(bdev_io);
1861 	}
1862 
1863 	for (i = 0; i < g_max_raids; i++) {
1864 		spdk_put_io_channel(channels[i]);
1865 		snprintf(name, 16, "%s", construct_req[i].name);
1866 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1867 		rpc_bdev_raid_delete(NULL, NULL);
1868 		CU_ASSERT(g_rpc_err == 0);
1869 		verify_raid_bdev_present(name, false);
1870 	}
1871 	raid_bdev_exit();
1872 	for (i = 0; i < g_max_raids; i++) {
1873 		free_test_req(&construct_req[i]);
1874 	}
1875 	free(construct_req);
1876 	free(channels);
1877 	base_bdevs_cleanup();
1878 	reset_globals();
1879 }
1880 
1881 static void
1882 test_io_type_supported(void)
1883 {
1884 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
1885 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
1886 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
1887 }
1888 
1889 static void
1890 test_raid_json_dump_info(void)
1891 {
1892 	struct rpc_bdev_raid_create req;
1893 	struct rpc_bdev_raid_delete destroy_req;
1894 	struct raid_bdev *pbdev;
1895 
1896 	set_globals();
1897 	CU_ASSERT(raid_bdev_init() == 0);
1898 
1899 	verify_raid_bdev_present("raid1", false);
1900 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1901 	rpc_bdev_raid_create(NULL, NULL);
1902 	CU_ASSERT(g_rpc_err == 0);
1903 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1904 
1905 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1906 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1907 			break;
1908 		}
1909 	}
1910 	CU_ASSERT(pbdev != NULL);
1911 
1912 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
1913 
1914 	free_test_req(&req);
1915 
1916 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1917 	rpc_bdev_raid_delete(NULL, NULL);
1918 	CU_ASSERT(g_rpc_err == 0);
1919 	verify_raid_bdev_present("raid1", false);
1920 
1921 	raid_bdev_exit();
1922 	base_bdevs_cleanup();
1923 	reset_globals();
1924 }
1925 
1926 static void
1927 test_context_size(void)
1928 {
1929 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
1930 }
1931 
1932 static void
1933 test_raid_level_conversions(void)
1934 {
1935 	const char *raid_str;
1936 
1937 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
1938 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
1939 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
1940 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
1941 
1942 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
1943 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
1944 	raid_str = raid_bdev_level_to_str(1234);
1945 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
1946 	raid_str = raid_bdev_level_to_str(RAID0);
1947 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
1948 }
1949 
1950 static void
1951 test_create_raid_superblock(void)
1952 {
1953 	struct rpc_bdev_raid_create req;
1954 	struct rpc_bdev_raid_delete delete_req;
1955 
1956 	set_globals();
1957 	CU_ASSERT(raid_bdev_init() == 0);
1958 
1959 	verify_raid_bdev_present("raid1", false);
1960 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
1961 	rpc_bdev_raid_create(NULL, NULL);
1962 	CU_ASSERT(g_rpc_err == 0);
1963 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1964 	free_test_req(&req);
1965 
1966 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1967 	rpc_bdev_raid_delete(NULL, NULL);
1968 	CU_ASSERT(g_rpc_err == 0);
1969 	raid_bdev_exit();
1970 	base_bdevs_cleanup();
1971 	reset_globals();
1972 
1973 }
1974 
1975 int
1976 main(int argc, char **argv)
1977 {
1978 	CU_pSuite       suite = NULL;
1979 	unsigned int    num_failures;
1980 
1981 	CU_initialize_registry();
1982 
1983 	suite = CU_add_suite("raid", NULL, NULL);
1984 
1985 	CU_ADD_TEST(suite, test_create_raid);
1986 	CU_ADD_TEST(suite, test_create_raid_superblock);
1987 	CU_ADD_TEST(suite, test_delete_raid);
1988 	CU_ADD_TEST(suite, test_create_raid_invalid_args);
1989 	CU_ADD_TEST(suite, test_delete_raid_invalid_args);
1990 	CU_ADD_TEST(suite, test_io_channel);
1991 	CU_ADD_TEST(suite, test_reset_io);
1992 	CU_ADD_TEST(suite, test_write_io);
1993 	CU_ADD_TEST(suite, test_read_io);
1994 	CU_ADD_TEST(suite, test_unmap_io);
1995 	CU_ADD_TEST(suite, test_io_failure);
1996 	CU_ADD_TEST(suite, test_multi_raid_no_io);
1997 	CU_ADD_TEST(suite, test_multi_raid_with_io);
1998 	CU_ADD_TEST(suite, test_io_type_supported);
1999 	CU_ADD_TEST(suite, test_raid_json_dump_info);
2000 	CU_ADD_TEST(suite, test_context_size);
2001 	CU_ADD_TEST(suite, test_raid_level_conversions);
2002 
2003 	allocate_threads(1);
2004 	set_thread(0);
2005 
2006 	set_test_opts();
2007 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2008 	CU_cleanup_registry();
2009 
2010 	free_threads();
2011 
2012 	return num_failures;
2013 }
2014