xref: /spdk/test/unit/lib/bdev/raid/bdev_raid.c/bdev_raid_ut.c (revision 7521dc6f4b7ea46945f4add1aabf3f320c81ad5a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk_internal/cunit.h"
9 #include "spdk/env.h"
10 #include "spdk_internal/mock.h"
11 #include "thread/thread_internal.h"
12 #include "bdev/raid/bdev_raid.c"
13 #include "bdev/raid/bdev_raid_rpc.c"
14 #include "common/lib/test_env.c"
15 
16 #define MAX_BASE_DRIVES 32
17 #define MAX_RAIDS 2
18 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
19 #define MD_SIZE 8
20 
21 struct spdk_bdev_channel {
22 	struct spdk_io_channel *channel;
23 };
24 
25 struct spdk_bdev_desc {
26 	struct spdk_bdev *bdev;
27 };
28 
29 /* Data structure to capture the output of IO for verification */
30 struct io_output {
31 	struct spdk_bdev_desc       *desc;
32 	struct spdk_io_channel      *ch;
33 	enum spdk_bdev_io_type      iotype;
34 };
35 
36 /* Globals */
37 struct io_output *g_io_output = NULL;
38 uint32_t g_io_output_index;
39 uint32_t g_io_comp_status;
40 bool g_child_io_status_flag;
41 void *g_rpc_req;
42 uint32_t g_rpc_req_size;
43 TAILQ_HEAD(bdev, spdk_bdev);
44 struct bdev g_bdev_list;
45 uint32_t g_block_len;
46 uint32_t g_strip_size;
47 uint32_t g_max_io_size;
48 uint8_t g_max_base_drives;
49 uint8_t g_max_raids;
50 uint8_t g_rpc_err;
51 char *g_get_raids_output[MAX_RAIDS];
52 uint32_t g_get_raids_count;
53 uint8_t g_json_decode_obj_err;
54 uint8_t g_json_decode_obj_create;
55 uint8_t g_test_multi_raids;
56 uint64_t g_bdev_ch_io_device;
57 bool g_bdev_io_defer_completion;
58 TAILQ_HEAD(, spdk_bdev_io) g_deferred_ios = TAILQ_HEAD_INITIALIZER(g_deferred_ios);
59 struct spdk_thread *g_app_thread;
60 struct spdk_thread *g_latest_thread;
61 
62 static int
63 ut_raid_start(struct raid_bdev *raid_bdev)
64 {
65 	uint64_t min_blockcnt = UINT64_MAX;
66 	struct raid_base_bdev_info *base_info;
67 
68 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
69 		min_blockcnt = spdk_min(min_blockcnt, base_info->data_size);
70 	}
71 	raid_bdev->bdev.blockcnt = min_blockcnt;
72 
73 	return 0;
74 }
75 
76 static void
77 ut_raid_submit_rw_request_defered_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
78 {
79 	struct raid_bdev_io *raid_io = cb_arg;
80 
81 	raid_bdev_io_complete(raid_io, success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
82 }
83 
84 static void
85 ut_raid_submit_rw_request(struct raid_bdev_io *raid_io)
86 {
87 	if (g_bdev_io_defer_completion) {
88 		struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
89 
90 		bdev_io->internal.cb = ut_raid_submit_rw_request_defered_cb;
91 		bdev_io->internal.caller_ctx = raid_io;
92 		TAILQ_INSERT_TAIL(&g_deferred_ios, bdev_io, internal.link);
93 		return;
94 	}
95 	raid_bdev_io_complete(raid_io,
96 			      g_child_io_status_flag ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
97 }
98 
99 static void
100 ut_raid_submit_null_payload_request(struct raid_bdev_io *raid_io)
101 {
102 	raid_bdev_io_complete(raid_io,
103 			      g_child_io_status_flag ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
104 }
105 
106 static void
107 ut_raid_complete_process_request(void *ctx)
108 {
109 	struct raid_bdev_process_request *process_req = ctx;
110 
111 	raid_bdev_process_request_complete(process_req, 0);
112 }
113 
114 static int
115 ut_raid_submit_process_request(struct raid_bdev_process_request *process_req,
116 			       struct raid_bdev_io_channel *raid_ch)
117 {
118 	struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(raid_ch));
119 
120 	*(uint64_t *)raid_bdev->module_private += process_req->num_blocks;
121 
122 	spdk_thread_send_msg(spdk_get_thread(), ut_raid_complete_process_request, process_req);
123 
124 	return process_req->num_blocks;
125 }
126 
127 static struct raid_bdev_module g_ut_raid_module = {
128 	.level = 123,
129 	.base_bdevs_min = 1,
130 	.start = ut_raid_start,
131 	.submit_rw_request = ut_raid_submit_rw_request,
132 	.submit_null_payload_request = ut_raid_submit_null_payload_request,
133 	.submit_process_request = ut_raid_submit_process_request,
134 };
135 RAID_MODULE_REGISTER(&g_ut_raid_module)
136 
137 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
138 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
139 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
140 		enum spdk_bdev_io_type io_type), true);
141 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
142 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
143 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
144 		void *cb_arg), 0);
145 DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
146 		uint32_t state_mask));
147 DEFINE_STUB_V(spdk_jsonrpc_end_result, (struct spdk_jsonrpc_request *request,
148 					struct spdk_json_write_ctx *w));
149 DEFINE_STUB_V(spdk_jsonrpc_send_bool_response, (struct spdk_jsonrpc_request *request,
150 		bool value));
151 DEFINE_STUB(spdk_json_decode_string, int, (const struct spdk_json_val *val, void *out), 0);
152 DEFINE_STUB(spdk_json_decode_uint32, int, (const struct spdk_json_val *val, void *out), 0);
153 DEFINE_STUB(spdk_json_decode_uuid, int, (const struct spdk_json_val *val, void *out), 0);
154 DEFINE_STUB(spdk_json_decode_array, int, (const struct spdk_json_val *values,
155 		spdk_json_decode_fn decode_func,
156 		void *out, size_t max_size, size_t *out_size, size_t stride), 0);
157 DEFINE_STUB(spdk_json_decode_bool, int, (const struct spdk_json_val *val, void *out), 0);
158 DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
159 DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
160 DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
161 		const char *name), 0);
162 DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
163 DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
164 DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
165 DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
166 DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
167 		const char *name), 0);
168 DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
169 DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
170 		uint64_t val), 0);
171 DEFINE_STUB(spdk_strerror, const char *, (int errnum), NULL);
172 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
173 		struct spdk_bdev_io_wait_entry *entry), 0);
174 DEFINE_STUB(spdk_bdev_get_memory_domains, int, (struct spdk_bdev *bdev,
175 		struct spdk_memory_domain **domains,	int array_size), 0);
176 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test_bdev");
177 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, (const struct spdk_bdev *bdev), MD_SIZE);
178 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
179 DEFINE_STUB(spdk_bdev_is_md_separate, bool, (const struct spdk_bdev *bdev), true);
180 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type, (const struct spdk_bdev *bdev),
181 	    SPDK_DIF_DISABLE);
182 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
183 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
184 DEFINE_STUB(spdk_json_write_named_uuid, int, (struct spdk_json_write_ctx *w, const char *name,
185 		const struct spdk_uuid *val), 0);
186 DEFINE_STUB_V(raid_bdev_init_superblock, (struct raid_bdev *raid_bdev));
187 DEFINE_STUB(raid_bdev_alloc_superblock, int, (struct raid_bdev *raid_bdev, uint32_t block_size), 0);
188 DEFINE_STUB_V(raid_bdev_free_superblock, (struct raid_bdev *raid_bdev));
189 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
190 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
191 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
192 		struct spdk_bdev_ext_io_opts *opts), 0);
193 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
194 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt, uint64_t offset_blocks,
195 		uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
196 		struct spdk_bdev_ext_io_opts *opts), 0);
197 
198 uint32_t
199 spdk_bdev_get_data_block_size(const struct spdk_bdev *bdev)
200 {
201 	return g_block_len;
202 }
203 
204 int
205 raid_bdev_load_base_bdev_superblock(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
206 				    raid_bdev_load_sb_cb cb, void *cb_ctx)
207 {
208 	cb(NULL, -EINVAL, cb_ctx);
209 
210 	return 0;
211 }
212 
213 void
214 raid_bdev_write_superblock(struct raid_bdev *raid_bdev, raid_bdev_write_sb_cb cb, void *cb_ctx)
215 {
216 	cb(0, raid_bdev, cb_ctx);
217 }
218 
219 const struct spdk_uuid *
220 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
221 {
222 	return &bdev->uuid;
223 }
224 
225 struct spdk_io_channel *
226 spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
227 {
228 	return spdk_get_io_channel(&g_bdev_ch_io_device);
229 }
230 
231 static int
232 set_test_opts(void)
233 {
234 	g_max_base_drives = MAX_BASE_DRIVES;
235 	g_max_raids = MAX_RAIDS;
236 	g_block_len = 4096;
237 	g_strip_size = 64;
238 	g_max_io_size = 1024;
239 
240 	printf("Test Options\n");
241 	printf("blocklen = %u, strip_size = %u, max_io_size = %u, g_max_base_drives = %u, "
242 	       "g_max_raids = %u\n",
243 	       g_block_len, g_strip_size, g_max_io_size, g_max_base_drives, g_max_raids);
244 
245 	return 0;
246 }
247 
248 /* Set globals before every test run */
249 static void
250 set_globals(void)
251 {
252 	uint32_t max_splits;
253 
254 	if (g_max_io_size < g_strip_size) {
255 		max_splits = 2;
256 	} else {
257 		max_splits = (g_max_io_size / g_strip_size) + 1;
258 	}
259 	if (max_splits < g_max_base_drives) {
260 		max_splits = g_max_base_drives;
261 	}
262 
263 	g_io_output = calloc(max_splits, sizeof(struct io_output));
264 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
265 	g_io_output_index = 0;
266 	memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
267 	g_get_raids_count = 0;
268 	g_io_comp_status = 0;
269 	g_rpc_err = 0;
270 	g_test_multi_raids = 0;
271 	g_child_io_status_flag = true;
272 	TAILQ_INIT(&g_bdev_list);
273 	g_rpc_req = NULL;
274 	g_rpc_req_size = 0;
275 	g_json_decode_obj_err = 0;
276 	g_json_decode_obj_create = 0;
277 	g_bdev_io_defer_completion = false;
278 }
279 
280 static void
281 base_bdevs_cleanup(void)
282 {
283 	struct spdk_bdev *bdev;
284 	struct spdk_bdev *bdev_next;
285 
286 	if (!TAILQ_EMPTY(&g_bdev_list)) {
287 		TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
288 			free(bdev->name);
289 			TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
290 			free(bdev);
291 		}
292 	}
293 }
294 
295 static void
296 check_and_remove_raid_bdev(struct raid_bdev *raid_bdev)
297 {
298 	struct raid_base_bdev_info *base_info;
299 
300 	assert(raid_bdev != NULL);
301 	assert(raid_bdev->base_bdev_info != NULL);
302 
303 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
304 		if (base_info->desc) {
305 			raid_bdev_free_base_bdev_resource(base_info);
306 		}
307 	}
308 	assert(raid_bdev->num_base_bdevs_discovered == 0);
309 	raid_bdev_cleanup_and_free(raid_bdev);
310 }
311 
312 /* Reset globals */
313 static void
314 reset_globals(void)
315 {
316 	if (g_io_output) {
317 		free(g_io_output);
318 		g_io_output = NULL;
319 	}
320 	g_rpc_req = NULL;
321 	g_rpc_req_size = 0;
322 }
323 
324 void
325 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
326 		     uint64_t len)
327 {
328 	cb(bdev_io->internal.ch->channel, bdev_io, true);
329 }
330 
331 /* Store the IO completion status in global variable to verify by various tests */
332 void
333 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
334 {
335 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
336 }
337 
338 static void
339 complete_deferred_ios(void)
340 {
341 	struct spdk_bdev_io *child_io, *tmp;
342 
343 	TAILQ_FOREACH_SAFE(child_io, &g_deferred_ios, internal.link, tmp) {
344 		TAILQ_REMOVE(&g_deferred_ios, child_io, internal.link);
345 		child_io->internal.cb(child_io, g_child_io_status_flag, child_io->internal.caller_ctx);
346 	}
347 }
348 
349 int
350 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
351 		spdk_bdev_io_completion_cb cb, void *cb_arg)
352 {
353 	struct io_output *output = &g_io_output[g_io_output_index];
354 	struct spdk_bdev_io *child_io;
355 
356 	output->desc = desc;
357 	output->ch = ch;
358 	output->iotype = SPDK_BDEV_IO_TYPE_RESET;
359 
360 	g_io_output_index++;
361 
362 	child_io = calloc(1, sizeof(struct spdk_bdev_io));
363 	SPDK_CU_ASSERT_FATAL(child_io != NULL);
364 	cb(child_io, g_child_io_status_flag, cb_arg);
365 
366 	return 0;
367 }
368 
369 void
370 spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
371 {
372 	CU_ASSERT(bdeverrno == 0);
373 	SPDK_CU_ASSERT_FATAL(bdev->internal.unregister_cb != NULL);
374 	bdev->internal.unregister_cb(bdev->internal.unregister_ctx, bdeverrno);
375 }
376 
377 int
378 spdk_bdev_register(struct spdk_bdev *bdev)
379 {
380 	TAILQ_INSERT_TAIL(&g_bdev_list, bdev, internal.link);
381 	return 0;
382 }
383 
384 static void
385 poll_app_thread(void)
386 {
387 	while (spdk_thread_poll(g_app_thread, 0, 0) > 0) {
388 	}
389 }
390 
391 void
392 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
393 {
394 	int ret;
395 
396 	SPDK_CU_ASSERT_FATAL(spdk_bdev_get_by_name(bdev->name) == bdev);
397 	TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
398 
399 	bdev->internal.unregister_cb = cb_fn;
400 	bdev->internal.unregister_ctx = cb_arg;
401 
402 	ret = bdev->fn_table->destruct(bdev->ctxt);
403 	CU_ASSERT(ret == 1);
404 
405 	poll_app_thread();
406 }
407 
408 int
409 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
410 		   void *event_ctx, struct spdk_bdev_desc **_desc)
411 {
412 	struct spdk_bdev *bdev;
413 
414 	bdev = spdk_bdev_get_by_name(bdev_name);
415 	if (bdev == NULL) {
416 		return -ENODEV;
417 	}
418 
419 	*_desc = (void *)bdev;
420 	return 0;
421 }
422 
423 struct spdk_bdev *
424 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc)
425 {
426 	return (void *)desc;
427 }
428 
429 int
430 spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
431 {
432 	if (!g_test_multi_raids) {
433 		struct rpc_bdev_raid_create *req = g_rpc_req;
434 		if (strcmp(name, "strip_size_kb") == 0) {
435 			CU_ASSERT(req->strip_size_kb == val);
436 		} else if (strcmp(name, "blocklen_shift") == 0) {
437 			CU_ASSERT(spdk_u32log2(g_block_len) == val);
438 		} else if (strcmp(name, "num_base_bdevs") == 0) {
439 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
440 		} else if (strcmp(name, "state") == 0) {
441 			CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
442 		} else if (strcmp(name, "destruct_called") == 0) {
443 			CU_ASSERT(val == 0);
444 		} else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
445 			CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
446 		}
447 	}
448 	return 0;
449 }
450 
451 int
452 spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
453 {
454 	if (g_test_multi_raids) {
455 		if (strcmp(name, "name") == 0) {
456 			g_get_raids_output[g_get_raids_count] = strdup(val);
457 			SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
458 			g_get_raids_count++;
459 		}
460 	} else {
461 		struct rpc_bdev_raid_create *req = g_rpc_req;
462 		if (strcmp(name, "raid_level") == 0) {
463 			CU_ASSERT(strcmp(val, raid_bdev_level_to_str(req->level)) == 0);
464 		}
465 	}
466 	return 0;
467 }
468 
469 int
470 spdk_json_write_named_bool(struct spdk_json_write_ctx *w, const char *name, bool val)
471 {
472 	if (!g_test_multi_raids) {
473 		struct rpc_bdev_raid_create *req = g_rpc_req;
474 		if (strcmp(name, "superblock") == 0) {
475 			CU_ASSERT(val == req->superblock_enabled);
476 		}
477 	}
478 	return 0;
479 }
480 
481 void
482 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
483 {
484 	if (bdev_io) {
485 		free(bdev_io);
486 	}
487 }
488 
489 void
490 spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
491 {
492 	CU_ASSERT(bdev->internal.claim_type == SPDK_BDEV_CLAIM_EXCL_WRITE);
493 	CU_ASSERT(bdev->internal.claim.v1.module != NULL);
494 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_NONE;
495 	bdev->internal.claim.v1.module = NULL;
496 }
497 
498 int
499 spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
500 			    struct spdk_bdev_module *module)
501 {
502 	if (bdev->internal.claim_type != SPDK_BDEV_CLAIM_NONE) {
503 		CU_ASSERT(bdev->internal.claim.v1.module != NULL);
504 		return -1;
505 	}
506 	CU_ASSERT(bdev->internal.claim.v1.module == NULL);
507 	bdev->internal.claim_type = SPDK_BDEV_CLAIM_EXCL_WRITE;
508 	bdev->internal.claim.v1.module = module;
509 	return 0;
510 }
511 
512 int
513 spdk_json_decode_object(const struct spdk_json_val *values,
514 			const struct spdk_json_object_decoder *decoders, size_t num_decoders,
515 			void *out)
516 {
517 	struct rpc_bdev_raid_create *req, *_out;
518 	size_t i;
519 
520 	if (g_json_decode_obj_err) {
521 		return -1;
522 	} else if (g_json_decode_obj_create) {
523 		req = g_rpc_req;
524 		_out = out;
525 
526 		_out->name = strdup(req->name);
527 		SPDK_CU_ASSERT_FATAL(_out->name != NULL);
528 		_out->strip_size_kb = req->strip_size_kb;
529 		_out->level = req->level;
530 		_out->superblock_enabled = req->superblock_enabled;
531 		_out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
532 		for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
533 			_out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
534 			SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
535 		}
536 	} else {
537 		memcpy(out, g_rpc_req, g_rpc_req_size);
538 	}
539 
540 	return 0;
541 }
542 
543 struct spdk_json_write_ctx *
544 spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
545 {
546 	return (void *)1;
547 }
548 
549 void
550 spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
551 				 int error_code, const char *msg)
552 {
553 	g_rpc_err = 1;
554 }
555 
556 void
557 spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
558 				     int error_code, const char *fmt, ...)
559 {
560 	g_rpc_err = 1;
561 }
562 
563 struct spdk_bdev *
564 spdk_bdev_get_by_name(const char *bdev_name)
565 {
566 	struct spdk_bdev *bdev;
567 
568 	if (!TAILQ_EMPTY(&g_bdev_list)) {
569 		TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
570 			if (strcmp(bdev_name, bdev->name) == 0) {
571 				return bdev;
572 			}
573 		}
574 	}
575 
576 	return NULL;
577 }
578 
579 int
580 spdk_bdev_quiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
581 		  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
582 {
583 	if (cb_fn) {
584 		cb_fn(cb_arg, 0);
585 	}
586 
587 	return 0;
588 }
589 
590 int
591 spdk_bdev_unquiesce(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
592 		    spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
593 {
594 	if (cb_fn) {
595 		cb_fn(cb_arg, 0);
596 	}
597 
598 	return 0;
599 }
600 
601 int
602 spdk_bdev_quiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
603 			uint64_t offset, uint64_t length,
604 			spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
605 {
606 	if (cb_fn) {
607 		cb_fn(cb_arg, 0);
608 	}
609 
610 	return 0;
611 }
612 
613 int
614 spdk_bdev_unquiesce_range(struct spdk_bdev *bdev, struct spdk_bdev_module *module,
615 			  uint64_t offset, uint64_t length,
616 			  spdk_bdev_quiesce_cb cb_fn, void *cb_arg)
617 {
618 	if (cb_fn) {
619 		cb_fn(cb_arg, 0);
620 	}
621 
622 	return 0;
623 }
624 
625 static void
626 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
627 {
628 	if (bdev_io->u.bdev.iovs) {
629 		int i;
630 
631 		for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
632 			free(bdev_io->u.bdev.iovs[i].iov_base);
633 		}
634 		free(bdev_io->u.bdev.iovs);
635 	}
636 
637 	free(bdev_io);
638 }
639 
640 static void
641 _bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch,
642 		    struct spdk_bdev *bdev, uint64_t lba, uint64_t blocks, int16_t iotype,
643 		    int iovcnt, size_t iov_len)
644 {
645 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
646 	int i;
647 
648 	bdev_io->bdev = bdev;
649 	bdev_io->u.bdev.offset_blocks = lba;
650 	bdev_io->u.bdev.num_blocks = blocks;
651 	bdev_io->type = iotype;
652 	bdev_io->internal.ch = channel;
653 	bdev_io->u.bdev.iovcnt = iovcnt;
654 
655 	if (iovcnt == 0) {
656 		bdev_io->u.bdev.iovs = NULL;
657 		return;
658 	}
659 
660 	SPDK_CU_ASSERT_FATAL(iov_len * iovcnt == blocks * g_block_len);
661 
662 	bdev_io->u.bdev.iovs = calloc(iovcnt, sizeof(struct iovec));
663 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
664 
665 	for (i = 0; i < iovcnt; i++) {
666 		struct iovec *iov = &bdev_io->u.bdev.iovs[i];
667 
668 		iov->iov_base = calloc(1, iov_len);
669 		SPDK_CU_ASSERT_FATAL(iov->iov_base != NULL);
670 		iov->iov_len = iov_len;
671 	}
672 
673 	bdev_io->u.bdev.md_buf = (void *)0x10000000;
674 }
675 
676 static void
677 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
678 		   uint64_t lba, uint64_t blocks, int16_t iotype)
679 {
680 	_bdev_io_initialize(bdev_io, ch, bdev, lba, blocks, iotype, 0, 0);
681 }
682 
683 static void
684 verify_reset_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
685 		struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
686 {
687 	uint8_t index = 0;
688 	struct io_output *output;
689 
690 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
691 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
692 	SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
693 
694 	CU_ASSERT(g_io_output_index == num_base_drives);
695 	for (index = 0; index < g_io_output_index; index++) {
696 		output = &g_io_output[index];
697 		CU_ASSERT(ch_ctx->base_channel[index] == output->ch);
698 		CU_ASSERT(raid_bdev->base_bdev_info[index].desc == output->desc);
699 		CU_ASSERT(bdev_io->type == output->iotype);
700 	}
701 	CU_ASSERT(g_io_comp_status == io_status);
702 }
703 
704 static void
705 verify_raid_bdev_present(const char *name, bool presence)
706 {
707 	struct raid_bdev *pbdev;
708 	bool   pbdev_found;
709 
710 	pbdev_found = false;
711 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
712 		if (strcmp(pbdev->bdev.name, name) == 0) {
713 			pbdev_found = true;
714 			break;
715 		}
716 	}
717 	if (presence == true) {
718 		CU_ASSERT(pbdev_found == true);
719 	} else {
720 		CU_ASSERT(pbdev_found == false);
721 	}
722 }
723 
724 static void
725 verify_raid_bdev(struct rpc_bdev_raid_create *r, bool presence, uint32_t raid_state)
726 {
727 	struct raid_bdev *pbdev;
728 	struct raid_base_bdev_info *base_info;
729 	struct spdk_bdev *bdev = NULL;
730 	bool   pbdev_found;
731 	uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
732 
733 	pbdev_found = false;
734 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
735 		if (strcmp(pbdev->bdev.name, r->name) == 0) {
736 			pbdev_found = true;
737 			if (presence == false) {
738 				break;
739 			}
740 			CU_ASSERT(pbdev->base_bdev_info != NULL);
741 			CU_ASSERT(pbdev->strip_size == ((r->strip_size_kb * 1024) / g_block_len));
742 			CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size_kb * 1024) /
743 					g_block_len)));
744 			CU_ASSERT((uint32_t)pbdev->state == raid_state);
745 			CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
746 			CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
747 			CU_ASSERT(pbdev->level == r->level);
748 			CU_ASSERT(pbdev->base_bdev_info != NULL);
749 			RAID_FOR_EACH_BASE_BDEV(pbdev, base_info) {
750 				CU_ASSERT(base_info->desc != NULL);
751 				bdev = spdk_bdev_desc_get_bdev(base_info->desc);
752 				CU_ASSERT(bdev != NULL);
753 				CU_ASSERT(base_info->remove_scheduled == false);
754 				CU_ASSERT((pbdev->superblock_enabled && base_info->data_offset != 0) ||
755 					  (!pbdev->superblock_enabled && base_info->data_offset == 0));
756 				CU_ASSERT(base_info->data_offset + base_info->data_size == bdev->blockcnt);
757 
758 				if (bdev && base_info->data_size < min_blockcnt) {
759 					min_blockcnt = base_info->data_size;
760 				}
761 			}
762 			CU_ASSERT(strcmp(pbdev->bdev.product_name, "Raid Volume") == 0);
763 			CU_ASSERT(pbdev->bdev.write_cache == 0);
764 			CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
765 			CU_ASSERT(pbdev->bdev.ctxt == pbdev);
766 			CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
767 			CU_ASSERT(pbdev->bdev.module == &g_raid_if);
768 			break;
769 		}
770 	}
771 	if (presence == true) {
772 		CU_ASSERT(pbdev_found == true);
773 	} else {
774 		CU_ASSERT(pbdev_found == false);
775 	}
776 }
777 
778 static void
779 verify_get_raids(struct rpc_bdev_raid_create *construct_req,
780 		 uint8_t g_max_raids,
781 		 char **g_get_raids_output, uint32_t g_get_raids_count)
782 {
783 	uint8_t i, j;
784 	bool found;
785 
786 	CU_ASSERT(g_max_raids == g_get_raids_count);
787 	if (g_max_raids == g_get_raids_count) {
788 		for (i = 0; i < g_max_raids; i++) {
789 			found = false;
790 			for (j = 0; j < g_max_raids; j++) {
791 				if (construct_req[i].name &&
792 				    strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
793 					found = true;
794 					break;
795 				}
796 			}
797 			CU_ASSERT(found == true);
798 		}
799 	}
800 }
801 
802 static void
803 create_base_bdevs(uint32_t bbdev_start_idx)
804 {
805 	uint8_t i;
806 	struct spdk_bdev *base_bdev;
807 	char name[16];
808 
809 	for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
810 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
811 		base_bdev = calloc(1, sizeof(struct spdk_bdev));
812 		SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
813 		base_bdev->name = strdup(name);
814 		spdk_uuid_generate(&base_bdev->uuid);
815 		SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
816 		base_bdev->blocklen = g_block_len;
817 		base_bdev->blockcnt = BLOCK_CNT;
818 		TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
819 	}
820 }
821 
822 static void
823 create_test_req(struct rpc_bdev_raid_create *r, const char *raid_name,
824 		uint8_t bbdev_start_idx, bool create_base_bdev, bool superblock_enabled)
825 {
826 	uint8_t i;
827 	char name[16];
828 	uint8_t bbdev_idx = bbdev_start_idx;
829 
830 	r->name = strdup(raid_name);
831 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
832 	r->strip_size_kb = (g_strip_size * g_block_len) / 1024;
833 	r->level = 123;
834 	r->superblock_enabled = superblock_enabled;
835 	r->base_bdevs.num_base_bdevs = g_max_base_drives;
836 	for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
837 		snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
838 		r->base_bdevs.base_bdevs[i] = strdup(name);
839 		SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
840 	}
841 	if (create_base_bdev == true) {
842 		create_base_bdevs(bbdev_start_idx);
843 	}
844 	g_rpc_req = r;
845 	g_rpc_req_size = sizeof(*r);
846 }
847 
848 static void
849 create_raid_bdev_create_req(struct rpc_bdev_raid_create *r, const char *raid_name,
850 			    uint8_t bbdev_start_idx, bool create_base_bdev,
851 			    uint8_t json_decode_obj_err, bool superblock_enabled)
852 {
853 	create_test_req(r, raid_name, bbdev_start_idx, create_base_bdev, superblock_enabled);
854 
855 	g_rpc_err = 0;
856 	g_json_decode_obj_create = 1;
857 	g_json_decode_obj_err = json_decode_obj_err;
858 	g_test_multi_raids = 0;
859 }
860 
861 static void
862 free_test_req(struct rpc_bdev_raid_create *r)
863 {
864 	uint8_t i;
865 
866 	free(r->name);
867 	for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
868 		free(r->base_bdevs.base_bdevs[i]);
869 	}
870 }
871 
872 static void
873 create_raid_bdev_delete_req(struct rpc_bdev_raid_delete *r, const char *raid_name,
874 			    uint8_t json_decode_obj_err)
875 {
876 	r->name = strdup(raid_name);
877 	SPDK_CU_ASSERT_FATAL(r->name != NULL);
878 
879 	g_rpc_req = r;
880 	g_rpc_req_size = sizeof(*r);
881 	g_rpc_err = 0;
882 	g_json_decode_obj_create = 0;
883 	g_json_decode_obj_err = json_decode_obj_err;
884 	g_test_multi_raids = 0;
885 }
886 
887 static void
888 create_get_raids_req(struct rpc_bdev_raid_get_bdevs *r, const char *category,
889 		     uint8_t json_decode_obj_err)
890 {
891 	r->category = strdup(category);
892 	SPDK_CU_ASSERT_FATAL(r->category != NULL);
893 
894 	g_rpc_req = r;
895 	g_rpc_req_size = sizeof(*r);
896 	g_rpc_err = 0;
897 	g_json_decode_obj_create = 0;
898 	g_json_decode_obj_err = json_decode_obj_err;
899 	g_test_multi_raids = 1;
900 	g_get_raids_count = 0;
901 }
902 
903 static void
904 test_create_raid(void)
905 {
906 	struct rpc_bdev_raid_create req;
907 	struct rpc_bdev_raid_delete delete_req;
908 
909 	set_globals();
910 	CU_ASSERT(raid_bdev_init() == 0);
911 
912 	verify_raid_bdev_present("raid1", false);
913 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
914 	rpc_bdev_raid_create(NULL, NULL);
915 	CU_ASSERT(g_rpc_err == 0);
916 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
917 	free_test_req(&req);
918 
919 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
920 	rpc_bdev_raid_delete(NULL, NULL);
921 	CU_ASSERT(g_rpc_err == 0);
922 	raid_bdev_exit();
923 	base_bdevs_cleanup();
924 	reset_globals();
925 }
926 
927 static void
928 test_delete_raid(void)
929 {
930 	struct rpc_bdev_raid_create construct_req;
931 	struct rpc_bdev_raid_delete delete_req;
932 
933 	set_globals();
934 	CU_ASSERT(raid_bdev_init() == 0);
935 
936 	verify_raid_bdev_present("raid1", false);
937 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
938 	rpc_bdev_raid_create(NULL, NULL);
939 	CU_ASSERT(g_rpc_err == 0);
940 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
941 	free_test_req(&construct_req);
942 
943 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
944 	rpc_bdev_raid_delete(NULL, NULL);
945 	CU_ASSERT(g_rpc_err == 0);
946 	verify_raid_bdev_present("raid1", false);
947 
948 	raid_bdev_exit();
949 	base_bdevs_cleanup();
950 	reset_globals();
951 }
952 
953 static void
954 test_create_raid_invalid_args(void)
955 {
956 	struct rpc_bdev_raid_create req;
957 	struct rpc_bdev_raid_delete destroy_req;
958 	struct raid_bdev *raid_bdev;
959 
960 	set_globals();
961 	CU_ASSERT(raid_bdev_init() == 0);
962 
963 	verify_raid_bdev_present("raid1", false);
964 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
965 	req.level = INVALID_RAID_LEVEL;
966 	rpc_bdev_raid_create(NULL, NULL);
967 	CU_ASSERT(g_rpc_err == 1);
968 	free_test_req(&req);
969 	verify_raid_bdev_present("raid1", false);
970 
971 	create_raid_bdev_create_req(&req, "raid1", 0, false, 1, false);
972 	rpc_bdev_raid_create(NULL, NULL);
973 	CU_ASSERT(g_rpc_err == 1);
974 	free_test_req(&req);
975 	verify_raid_bdev_present("raid1", false);
976 
977 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
978 	req.strip_size_kb = 1231;
979 	rpc_bdev_raid_create(NULL, NULL);
980 	CU_ASSERT(g_rpc_err == 1);
981 	free_test_req(&req);
982 	verify_raid_bdev_present("raid1", false);
983 
984 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
985 	rpc_bdev_raid_create(NULL, NULL);
986 	CU_ASSERT(g_rpc_err == 0);
987 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
988 	free_test_req(&req);
989 
990 	create_raid_bdev_create_req(&req, "raid1", 0, false, 0, false);
991 	rpc_bdev_raid_create(NULL, NULL);
992 	CU_ASSERT(g_rpc_err == 1);
993 	free_test_req(&req);
994 
995 	create_raid_bdev_create_req(&req, "raid2", 0, false, 0, false);
996 	rpc_bdev_raid_create(NULL, NULL);
997 	CU_ASSERT(g_rpc_err == 1);
998 	free_test_req(&req);
999 	verify_raid_bdev_present("raid2", false);
1000 
1001 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1002 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1003 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
1004 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1005 	rpc_bdev_raid_create(NULL, NULL);
1006 	CU_ASSERT(g_rpc_err == 1);
1007 	free_test_req(&req);
1008 	verify_raid_bdev_present("raid2", false);
1009 
1010 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, true, 0, false);
1011 	free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
1012 	req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
1013 	SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
1014 	rpc_bdev_raid_create(NULL, NULL);
1015 	CU_ASSERT(g_rpc_err == 0);
1016 	free_test_req(&req);
1017 	verify_raid_bdev_present("raid2", true);
1018 	raid_bdev = raid_bdev_find_by_name("raid2");
1019 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
1020 	check_and_remove_raid_bdev(raid_bdev);
1021 
1022 	create_raid_bdev_create_req(&req, "raid2", g_max_base_drives, false, 0, false);
1023 	rpc_bdev_raid_create(NULL, NULL);
1024 	CU_ASSERT(g_rpc_err == 0);
1025 	free_test_req(&req);
1026 	verify_raid_bdev_present("raid2", true);
1027 	verify_raid_bdev_present("raid1", true);
1028 
1029 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1030 	rpc_bdev_raid_delete(NULL, NULL);
1031 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1032 	rpc_bdev_raid_delete(NULL, NULL);
1033 	raid_bdev_exit();
1034 	base_bdevs_cleanup();
1035 	reset_globals();
1036 }
1037 
1038 static void
1039 test_delete_raid_invalid_args(void)
1040 {
1041 	struct rpc_bdev_raid_create construct_req;
1042 	struct rpc_bdev_raid_delete destroy_req;
1043 
1044 	set_globals();
1045 	CU_ASSERT(raid_bdev_init() == 0);
1046 
1047 	verify_raid_bdev_present("raid1", false);
1048 	create_raid_bdev_create_req(&construct_req, "raid1", 0, true, 0, false);
1049 	rpc_bdev_raid_create(NULL, NULL);
1050 	CU_ASSERT(g_rpc_err == 0);
1051 	verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
1052 	free_test_req(&construct_req);
1053 
1054 	create_raid_bdev_delete_req(&destroy_req, "raid2", 0);
1055 	rpc_bdev_raid_delete(NULL, NULL);
1056 	CU_ASSERT(g_rpc_err == 1);
1057 
1058 	create_raid_bdev_delete_req(&destroy_req, "raid1", 1);
1059 	rpc_bdev_raid_delete(NULL, NULL);
1060 	CU_ASSERT(g_rpc_err == 1);
1061 	free(destroy_req.name);
1062 	verify_raid_bdev_present("raid1", true);
1063 
1064 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1065 	rpc_bdev_raid_delete(NULL, NULL);
1066 	CU_ASSERT(g_rpc_err == 0);
1067 	verify_raid_bdev_present("raid1", false);
1068 
1069 	raid_bdev_exit();
1070 	base_bdevs_cleanup();
1071 	reset_globals();
1072 }
1073 
1074 static void
1075 test_io_channel(void)
1076 {
1077 	struct rpc_bdev_raid_create req;
1078 	struct rpc_bdev_raid_delete destroy_req;
1079 	struct raid_bdev *pbdev;
1080 	struct spdk_io_channel *ch;
1081 	struct raid_bdev_io_channel *ch_ctx;
1082 
1083 	set_globals();
1084 	CU_ASSERT(raid_bdev_init() == 0);
1085 
1086 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1087 	verify_raid_bdev_present("raid1", false);
1088 	rpc_bdev_raid_create(NULL, NULL);
1089 	CU_ASSERT(g_rpc_err == 0);
1090 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1091 
1092 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1093 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1094 			break;
1095 		}
1096 	}
1097 	CU_ASSERT(pbdev != NULL);
1098 
1099 	ch = spdk_get_io_channel(pbdev);
1100 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1101 
1102 	ch_ctx = spdk_io_channel_get_ctx(ch);
1103 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1104 
1105 	free_test_req(&req);
1106 
1107 	spdk_put_io_channel(ch);
1108 
1109 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1110 	rpc_bdev_raid_delete(NULL, NULL);
1111 	CU_ASSERT(g_rpc_err == 0);
1112 	verify_raid_bdev_present("raid1", false);
1113 
1114 	raid_bdev_exit();
1115 	base_bdevs_cleanup();
1116 	reset_globals();
1117 }
1118 
1119 /* Test reset IO */
1120 static void
1121 test_reset_io(void)
1122 {
1123 	struct rpc_bdev_raid_create req;
1124 	struct rpc_bdev_raid_delete destroy_req;
1125 	struct raid_bdev *pbdev;
1126 	struct spdk_io_channel *ch;
1127 	struct raid_bdev_io_channel *ch_ctx;
1128 	struct spdk_bdev_io *bdev_io;
1129 
1130 	set_globals();
1131 	CU_ASSERT(raid_bdev_init() == 0);
1132 
1133 	verify_raid_bdev_present("raid1", false);
1134 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1135 	rpc_bdev_raid_create(NULL, NULL);
1136 	CU_ASSERT(g_rpc_err == 0);
1137 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1138 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1139 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1140 			break;
1141 		}
1142 	}
1143 	CU_ASSERT(pbdev != NULL);
1144 
1145 	ch = spdk_get_io_channel(pbdev);
1146 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1147 
1148 	ch_ctx = spdk_io_channel_get_ctx(ch);
1149 	SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
1150 
1151 	g_child_io_status_flag = true;
1152 
1153 	CU_ASSERT(raid_bdev_io_type_supported(pbdev, SPDK_BDEV_IO_TYPE_RESET) == true);
1154 
1155 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1156 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1157 	bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, 1, SPDK_BDEV_IO_TYPE_RESET);
1158 	memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
1159 	g_io_output_index = 0;
1160 	raid_bdev_submit_request(ch, bdev_io);
1161 	verify_reset_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
1162 			true);
1163 	bdev_io_cleanup(bdev_io);
1164 
1165 	free_test_req(&req);
1166 	spdk_put_io_channel(ch);
1167 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1168 	rpc_bdev_raid_delete(NULL, NULL);
1169 	CU_ASSERT(g_rpc_err == 0);
1170 	verify_raid_bdev_present("raid1", false);
1171 
1172 	raid_bdev_exit();
1173 	base_bdevs_cleanup();
1174 	reset_globals();
1175 }
1176 
1177 /* Create multiple raids, destroy raids without IO, get_raids related tests */
1178 static void
1179 test_multi_raid(void)
1180 {
1181 	struct rpc_bdev_raid_create *construct_req;
1182 	struct rpc_bdev_raid_delete destroy_req;
1183 	struct rpc_bdev_raid_get_bdevs get_raids_req;
1184 	uint8_t i;
1185 	char name[16];
1186 	uint8_t bbdev_idx = 0;
1187 
1188 	set_globals();
1189 	construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_bdev_raid_create));
1190 	SPDK_CU_ASSERT_FATAL(construct_req != NULL);
1191 	CU_ASSERT(raid_bdev_init() == 0);
1192 	for (i = 0; i < g_max_raids; i++) {
1193 		snprintf(name, 16, "%s%u", "raid", i);
1194 		verify_raid_bdev_present(name, false);
1195 		create_raid_bdev_create_req(&construct_req[i], name, bbdev_idx, true, 0, false);
1196 		bbdev_idx += g_max_base_drives;
1197 		rpc_bdev_raid_create(NULL, NULL);
1198 		CU_ASSERT(g_rpc_err == 0);
1199 		verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
1200 	}
1201 
1202 	create_get_raids_req(&get_raids_req, "all", 0);
1203 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1204 	CU_ASSERT(g_rpc_err == 0);
1205 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1206 	for (i = 0; i < g_get_raids_count; i++) {
1207 		free(g_get_raids_output[i]);
1208 	}
1209 
1210 	create_get_raids_req(&get_raids_req, "online", 0);
1211 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1212 	CU_ASSERT(g_rpc_err == 0);
1213 	verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
1214 	for (i = 0; i < g_get_raids_count; i++) {
1215 		free(g_get_raids_output[i]);
1216 	}
1217 
1218 	create_get_raids_req(&get_raids_req, "configuring", 0);
1219 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1220 	CU_ASSERT(g_rpc_err == 0);
1221 	CU_ASSERT(g_get_raids_count == 0);
1222 
1223 	create_get_raids_req(&get_raids_req, "offline", 0);
1224 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1225 	CU_ASSERT(g_rpc_err == 0);
1226 	CU_ASSERT(g_get_raids_count == 0);
1227 
1228 	create_get_raids_req(&get_raids_req, "invalid_category", 0);
1229 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1230 	CU_ASSERT(g_rpc_err == 1);
1231 	CU_ASSERT(g_get_raids_count == 0);
1232 
1233 	create_get_raids_req(&get_raids_req, "all", 1);
1234 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1235 	CU_ASSERT(g_rpc_err == 1);
1236 	free(get_raids_req.category);
1237 	CU_ASSERT(g_get_raids_count == 0);
1238 
1239 	create_get_raids_req(&get_raids_req, "all", 0);
1240 	rpc_bdev_raid_get_bdevs(NULL, NULL);
1241 	CU_ASSERT(g_rpc_err == 0);
1242 	CU_ASSERT(g_get_raids_count == g_max_raids);
1243 	for (i = 0; i < g_get_raids_count; i++) {
1244 		free(g_get_raids_output[i]);
1245 	}
1246 
1247 	for (i = 0; i < g_max_raids; i++) {
1248 		SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
1249 		snprintf(name, 16, "%s", construct_req[i].name);
1250 		create_raid_bdev_delete_req(&destroy_req, name, 0);
1251 		rpc_bdev_raid_delete(NULL, NULL);
1252 		CU_ASSERT(g_rpc_err == 0);
1253 		verify_raid_bdev_present(name, false);
1254 	}
1255 	raid_bdev_exit();
1256 	for (i = 0; i < g_max_raids; i++) {
1257 		free_test_req(&construct_req[i]);
1258 	}
1259 	free(construct_req);
1260 	base_bdevs_cleanup();
1261 	reset_globals();
1262 }
1263 
1264 static void
1265 test_io_type_supported(void)
1266 {
1267 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
1268 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
1269 	CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
1270 }
1271 
1272 static void
1273 test_raid_json_dump_info(void)
1274 {
1275 	struct rpc_bdev_raid_create req;
1276 	struct rpc_bdev_raid_delete destroy_req;
1277 	struct raid_bdev *pbdev;
1278 
1279 	set_globals();
1280 	CU_ASSERT(raid_bdev_init() == 0);
1281 
1282 	verify_raid_bdev_present("raid1", false);
1283 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1284 	rpc_bdev_raid_create(NULL, NULL);
1285 	CU_ASSERT(g_rpc_err == 0);
1286 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1287 
1288 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1289 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1290 			break;
1291 		}
1292 	}
1293 	CU_ASSERT(pbdev != NULL);
1294 
1295 	CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
1296 
1297 	free_test_req(&req);
1298 
1299 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1300 	rpc_bdev_raid_delete(NULL, NULL);
1301 	CU_ASSERT(g_rpc_err == 0);
1302 	verify_raid_bdev_present("raid1", false);
1303 
1304 	raid_bdev_exit();
1305 	base_bdevs_cleanup();
1306 	reset_globals();
1307 }
1308 
1309 static void
1310 test_context_size(void)
1311 {
1312 	CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
1313 }
1314 
1315 static void
1316 test_raid_level_conversions(void)
1317 {
1318 	const char *raid_str;
1319 
1320 	CU_ASSERT(raid_bdev_str_to_level("abcd123") == INVALID_RAID_LEVEL);
1321 	CU_ASSERT(raid_bdev_str_to_level("0") == RAID0);
1322 	CU_ASSERT(raid_bdev_str_to_level("raid0") == RAID0);
1323 	CU_ASSERT(raid_bdev_str_to_level("RAID0") == RAID0);
1324 
1325 	raid_str = raid_bdev_level_to_str(INVALID_RAID_LEVEL);
1326 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
1327 	raid_str = raid_bdev_level_to_str(1234);
1328 	CU_ASSERT(raid_str != NULL && strlen(raid_str) == 0);
1329 	raid_str = raid_bdev_level_to_str(RAID0);
1330 	CU_ASSERT(raid_str != NULL && strcmp(raid_str, "raid0") == 0);
1331 }
1332 
1333 static void
1334 test_create_raid_superblock(void)
1335 {
1336 	struct rpc_bdev_raid_create req;
1337 	struct rpc_bdev_raid_delete delete_req;
1338 
1339 	set_globals();
1340 	CU_ASSERT(raid_bdev_init() == 0);
1341 
1342 	verify_raid_bdev_present("raid1", false);
1343 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, true);
1344 	rpc_bdev_raid_create(NULL, NULL);
1345 	CU_ASSERT(g_rpc_err == 0);
1346 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1347 	free_test_req(&req);
1348 
1349 	create_raid_bdev_delete_req(&delete_req, "raid1", 0);
1350 	rpc_bdev_raid_delete(NULL, NULL);
1351 	CU_ASSERT(g_rpc_err == 0);
1352 	raid_bdev_exit();
1353 	base_bdevs_cleanup();
1354 	reset_globals();
1355 }
1356 
1357 static void
1358 test_raid_process(void)
1359 {
1360 	struct rpc_bdev_raid_create req;
1361 	struct rpc_bdev_raid_delete destroy_req;
1362 	struct raid_bdev *pbdev;
1363 	struct spdk_bdev *base_bdev;
1364 	struct spdk_thread *process_thread;
1365 	uint64_t num_blocks_processed = 0;
1366 
1367 	set_globals();
1368 	CU_ASSERT(raid_bdev_init() == 0);
1369 
1370 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1371 	verify_raid_bdev_present("raid1", false);
1372 	TAILQ_FOREACH(base_bdev, &g_bdev_list, internal.link) {
1373 		base_bdev->blockcnt = 128;
1374 	}
1375 	rpc_bdev_raid_create(NULL, NULL);
1376 	CU_ASSERT(g_rpc_err == 0);
1377 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1378 	free_test_req(&req);
1379 
1380 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1381 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1382 			break;
1383 		}
1384 	}
1385 	CU_ASSERT(pbdev != NULL);
1386 
1387 	pbdev->module_private = &num_blocks_processed;
1388 	pbdev->min_base_bdevs_operational = 0;
1389 
1390 	CU_ASSERT(raid_bdev_start_rebuild(&pbdev->base_bdev_info[0]) == 0);
1391 	poll_app_thread();
1392 
1393 	SPDK_CU_ASSERT_FATAL(pbdev->process != NULL);
1394 
1395 	process_thread = g_latest_thread;
1396 	spdk_thread_poll(process_thread, 0, 0);
1397 	SPDK_CU_ASSERT_FATAL(pbdev->process->thread == process_thread);
1398 
1399 	while (spdk_thread_poll(process_thread, 0, 0) > 0) {
1400 		poll_app_thread();
1401 	}
1402 
1403 	CU_ASSERT(pbdev->process == NULL);
1404 	CU_ASSERT(num_blocks_processed == pbdev->bdev.blockcnt);
1405 
1406 	poll_app_thread();
1407 
1408 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1409 	rpc_bdev_raid_delete(NULL, NULL);
1410 	CU_ASSERT(g_rpc_err == 0);
1411 	verify_raid_bdev_present("raid1", false);
1412 
1413 	raid_bdev_exit();
1414 	base_bdevs_cleanup();
1415 	reset_globals();
1416 }
1417 
1418 static void
1419 test_raid_io_split(void)
1420 {
1421 	struct rpc_bdev_raid_create req;
1422 	struct rpc_bdev_raid_delete destroy_req;
1423 	struct raid_bdev *pbdev;
1424 	struct spdk_io_channel *ch;
1425 	struct raid_bdev_io_channel *raid_ch;
1426 	struct spdk_bdev_io *bdev_io;
1427 	struct raid_bdev_io *raid_io;
1428 	uint64_t split_offset;
1429 	struct iovec iovs_orig[4];
1430 	struct raid_bdev_process process = { };
1431 
1432 	set_globals();
1433 	CU_ASSERT(raid_bdev_init() == 0);
1434 
1435 	verify_raid_bdev_present("raid1", false);
1436 	create_raid_bdev_create_req(&req, "raid1", 0, true, 0, false);
1437 	rpc_bdev_raid_create(NULL, NULL);
1438 	CU_ASSERT(g_rpc_err == 0);
1439 	verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
1440 
1441 	TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
1442 		if (strcmp(pbdev->bdev.name, "raid1") == 0) {
1443 			break;
1444 		}
1445 	}
1446 	CU_ASSERT(pbdev != NULL);
1447 	pbdev->bdev.md_len = 8;
1448 
1449 	process.raid_bdev = pbdev;
1450 	process.target = &pbdev->base_bdev_info[0];
1451 	pbdev->process = &process;
1452 	ch = spdk_get_io_channel(pbdev);
1453 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1454 	raid_ch = spdk_io_channel_get_ctx(ch);
1455 	g_bdev_io_defer_completion = true;
1456 
1457 	/* test split of bdev_io with 1 iovec */
1458 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1459 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1460 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
1461 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE, 1,
1462 			    g_strip_size * g_block_len);
1463 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
1464 
1465 	split_offset = 1;
1466 	raid_ch->process.offset = split_offset;
1467 	raid_bdev_submit_request(ch, bdev_io);
1468 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
1469 	CU_ASSERT(raid_io->offset_blocks == split_offset);
1470 	CU_ASSERT(raid_io->iovcnt == 1);
1471 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1472 	CU_ASSERT(raid_io->iovs == raid_io->split.iov);
1473 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base + split_offset * g_block_len);
1474 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len - split_offset * g_block_len);
1475 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1476 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1477 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
1478 	}
1479 	complete_deferred_ios();
1480 	CU_ASSERT(raid_io->num_blocks == split_offset);
1481 	CU_ASSERT(raid_io->offset_blocks == 0);
1482 	CU_ASSERT(raid_io->iovcnt == 1);
1483 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
1484 	CU_ASSERT(raid_io->iovs[0].iov_len == split_offset * g_block_len);
1485 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1486 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1487 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1488 	}
1489 	complete_deferred_ios();
1490 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
1491 	CU_ASSERT(raid_io->offset_blocks == 0);
1492 	CU_ASSERT(raid_io->iovcnt == 1);
1493 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig->iov_base);
1494 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig->iov_len);
1495 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1496 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1497 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1498 	}
1499 
1500 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
1501 
1502 	bdev_io_cleanup(bdev_io);
1503 
1504 	/* test split of bdev_io with 4 iovecs */
1505 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
1506 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
1507 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
1508 	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_WRITE,
1509 			    4, g_strip_size / 4 * g_block_len);
1510 	memcpy(iovs_orig, bdev_io->u.bdev.iovs, sizeof(*iovs_orig) * bdev_io->u.bdev.iovcnt);
1511 
1512 	split_offset = 1; /* split at the first iovec */
1513 	raid_ch->process.offset = split_offset;
1514 	raid_bdev_submit_request(ch, bdev_io);
1515 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
1516 	CU_ASSERT(raid_io->offset_blocks == split_offset);
1517 	CU_ASSERT(raid_io->iovcnt == 4);
1518 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[0]);
1519 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[0]);
1520 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base + g_block_len);
1521 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[0].iov_len -  g_block_len);
1522 	CU_ASSERT(memcmp(raid_io->iovs + 1, iovs_orig + 1, sizeof(*iovs_orig) * 3) == 0);
1523 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1524 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1525 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
1526 	}
1527 	complete_deferred_ios();
1528 	CU_ASSERT(raid_io->num_blocks == split_offset);
1529 	CU_ASSERT(raid_io->offset_blocks == 0);
1530 	CU_ASSERT(raid_io->iovcnt == 1);
1531 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1532 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[0].iov_base);
1533 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
1534 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1535 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1536 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1537 	}
1538 	complete_deferred_ios();
1539 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
1540 	CU_ASSERT(raid_io->offset_blocks == 0);
1541 	CU_ASSERT(raid_io->iovcnt == 4);
1542 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1543 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1544 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1545 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1546 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1547 	}
1548 
1549 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
1550 
1551 	split_offset = g_strip_size / 2; /* split exactly between second and third iovec */
1552 	raid_ch->process.offset = split_offset;
1553 	raid_bdev_submit_request(ch, bdev_io);
1554 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
1555 	CU_ASSERT(raid_io->offset_blocks == split_offset);
1556 	CU_ASSERT(raid_io->iovcnt == 2);
1557 	CU_ASSERT(raid_io->split.iov == NULL);
1558 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
1559 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig + 2, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1560 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1561 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1562 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
1563 	}
1564 	complete_deferred_ios();
1565 	CU_ASSERT(raid_io->num_blocks == split_offset);
1566 	CU_ASSERT(raid_io->offset_blocks == 0);
1567 	CU_ASSERT(raid_io->iovcnt == 2);
1568 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1569 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1570 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1571 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1572 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1573 	}
1574 	complete_deferred_ios();
1575 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
1576 	CU_ASSERT(raid_io->offset_blocks == 0);
1577 	CU_ASSERT(raid_io->iovcnt == 4);
1578 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1579 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1580 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1581 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1582 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1583 	}
1584 
1585 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
1586 
1587 	split_offset = g_strip_size / 2 + 1; /* split at the third iovec */
1588 	raid_ch->process.offset = split_offset;
1589 	raid_bdev_submit_request(ch, bdev_io);
1590 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
1591 	CU_ASSERT(raid_io->offset_blocks == split_offset);
1592 	CU_ASSERT(raid_io->iovcnt == 2);
1593 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[2]);
1594 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[2]);
1595 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[2].iov_base + g_block_len);
1596 	CU_ASSERT(raid_io->iovs[0].iov_len == iovs_orig[2].iov_len - g_block_len);
1597 	CU_ASSERT(raid_io->iovs[1].iov_base == iovs_orig[3].iov_base);
1598 	CU_ASSERT(raid_io->iovs[1].iov_len == iovs_orig[3].iov_len);
1599 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1600 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1601 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
1602 	}
1603 	complete_deferred_ios();
1604 	CU_ASSERT(raid_io->num_blocks == split_offset);
1605 	CU_ASSERT(raid_io->offset_blocks == 0);
1606 	CU_ASSERT(raid_io->iovcnt == 3);
1607 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1608 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 2) == 0);
1609 	CU_ASSERT(raid_io->iovs[2].iov_base == iovs_orig[2].iov_base);
1610 	CU_ASSERT(raid_io->iovs[2].iov_len == g_block_len);
1611 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1612 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1613 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1614 	}
1615 	complete_deferred_ios();
1616 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
1617 	CU_ASSERT(raid_io->offset_blocks == 0);
1618 	CU_ASSERT(raid_io->iovcnt == 4);
1619 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1620 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1621 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1622 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1623 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1624 	}
1625 
1626 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
1627 
1628 	split_offset = g_strip_size - 1; /* split at the last iovec */
1629 	raid_ch->process.offset = split_offset;
1630 	raid_bdev_submit_request(ch, bdev_io);
1631 	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
1632 	CU_ASSERT(raid_io->offset_blocks == split_offset);
1633 	CU_ASSERT(raid_io->iovcnt == 1);
1634 	CU_ASSERT(raid_io->split.iov == &bdev_io->u.bdev.iovs[3]);
1635 	CU_ASSERT(raid_io->iovs == &bdev_io->u.bdev.iovs[3]);
1636 	CU_ASSERT(raid_io->iovs[0].iov_base == iovs_orig[3].iov_base + iovs_orig[3].iov_len - g_block_len);
1637 	CU_ASSERT(raid_io->iovs[0].iov_len == g_block_len);
1638 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1639 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1640 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf + split_offset * pbdev->bdev.md_len);
1641 	}
1642 	complete_deferred_ios();
1643 	CU_ASSERT(raid_io->num_blocks == split_offset);
1644 	CU_ASSERT(raid_io->offset_blocks == 0);
1645 	CU_ASSERT(raid_io->iovcnt == 4);
1646 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1647 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * 3) == 0);
1648 	CU_ASSERT(raid_io->iovs[3].iov_base == iovs_orig[3].iov_base);
1649 	CU_ASSERT(raid_io->iovs[3].iov_len == iovs_orig[3].iov_len - g_block_len);
1650 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1651 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1652 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1653 	}
1654 	complete_deferred_ios();
1655 	CU_ASSERT(raid_io->num_blocks == g_strip_size);
1656 	CU_ASSERT(raid_io->offset_blocks == 0);
1657 	CU_ASSERT(raid_io->iovcnt == 4);
1658 	CU_ASSERT(raid_io->iovs == bdev_io->u.bdev.iovs);
1659 	CU_ASSERT(memcmp(raid_io->iovs, iovs_orig, sizeof(*iovs_orig) * raid_io->iovcnt) == 0);
1660 	if (spdk_bdev_get_dif_type(&pbdev->bdev) != SPDK_DIF_DISABLE &&
1661 	    !spdk_bdev_is_md_interleaved(&pbdev->bdev)) {
1662 		CU_ASSERT(raid_io->md_buf == bdev_io->u.bdev.md_buf);
1663 	}
1664 
1665 	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);
1666 
1667 	bdev_io_cleanup(bdev_io);
1668 
1669 	spdk_put_io_channel(ch);
1670 	free_test_req(&req);
1671 	pbdev->process = NULL;
1672 
1673 	create_raid_bdev_delete_req(&destroy_req, "raid1", 0);
1674 	rpc_bdev_raid_delete(NULL, NULL);
1675 	CU_ASSERT(g_rpc_err == 0);
1676 	verify_raid_bdev_present("raid1", false);
1677 
1678 	raid_bdev_exit();
1679 	base_bdevs_cleanup();
1680 	reset_globals();
1681 }
1682 
1683 static int
1684 test_new_thread_fn(struct spdk_thread *thread)
1685 {
1686 	g_latest_thread = thread;
1687 
1688 	return 0;
1689 }
1690 
1691 static int
1692 test_bdev_ioch_create(void *io_device, void *ctx_buf)
1693 {
1694 	return 0;
1695 }
1696 
1697 static void
1698 test_bdev_ioch_destroy(void *io_device, void *ctx_buf)
1699 {
1700 }
1701 
1702 int
1703 main(int argc, char **argv)
1704 {
1705 	CU_pSuite suite = NULL;
1706 	unsigned int num_failures;
1707 
1708 	CU_initialize_registry();
1709 
1710 	suite = CU_add_suite("raid", set_test_opts, NULL);
1711 	CU_ADD_TEST(suite, test_create_raid);
1712 	CU_ADD_TEST(suite, test_create_raid_superblock);
1713 	CU_ADD_TEST(suite, test_delete_raid);
1714 	CU_ADD_TEST(suite, test_create_raid_invalid_args);
1715 	CU_ADD_TEST(suite, test_delete_raid_invalid_args);
1716 	CU_ADD_TEST(suite, test_io_channel);
1717 	CU_ADD_TEST(suite, test_reset_io);
1718 	CU_ADD_TEST(suite, test_multi_raid);
1719 	CU_ADD_TEST(suite, test_io_type_supported);
1720 	CU_ADD_TEST(suite, test_raid_json_dump_info);
1721 	CU_ADD_TEST(suite, test_context_size);
1722 	CU_ADD_TEST(suite, test_raid_level_conversions);
1723 	CU_ADD_TEST(suite, test_raid_io_split);
1724 	CU_ADD_TEST(suite, test_raid_process);
1725 
1726 	spdk_thread_lib_init(test_new_thread_fn, 0);
1727 	g_app_thread = spdk_thread_create("app_thread", NULL);
1728 	spdk_set_thread(g_app_thread);
1729 	spdk_io_device_register(&g_bdev_ch_io_device, test_bdev_ioch_create, test_bdev_ioch_destroy, 0,
1730 				NULL);
1731 
1732 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1733 	CU_cleanup_registry();
1734 
1735 	spdk_io_device_unregister(&g_bdev_ch_io_device, NULL);
1736 	spdk_thread_exit(g_app_thread);
1737 	spdk_thread_poll(g_app_thread, 0, 0);
1738 	spdk_thread_destroy(g_app_thread);
1739 	spdk_thread_lib_fini();
1740 
1741 	return num_failures;
1742 }
1743