xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision ad5fc351dd221a287cce269ad0e50b11253cc48b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 
25 struct spdk_io_channel *
26 spdk_accel_get_io_channel(void)
27 {
28 	return spdk_get_io_channel(g_accel_p);
29 }
30 
31 uint32_t
32 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
33 {
34 	return bdev->md_len;
35 }
36 
37 struct xor_ctx {
38 	spdk_accel_completion_cb cb_fn;
39 	void *cb_arg;
40 };
41 
42 static void
43 finish_xor(void *_ctx)
44 {
45 	struct xor_ctx *ctx = _ctx;
46 
47 	ctx->cb_fn(ctx->cb_arg, 0);
48 
49 	free(ctx);
50 }
51 
52 int
53 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
54 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
55 {
56 	struct xor_ctx *ctx;
57 
58 	ctx = malloc(sizeof(*ctx));
59 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
60 	ctx->cb_fn = cb_fn;
61 	ctx->cb_arg = cb_arg;
62 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
63 
64 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
65 
66 	return 0;
67 }
68 
69 static void
70 init_accel(void)
71 {
72 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
73 				sizeof(int), "accel_p");
74 }
75 
76 static void
77 fini_accel(void)
78 {
79 	spdk_io_device_unregister(g_accel_p, NULL);
80 }
81 
82 static int
83 test_suite_init(void)
84 {
85 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
86 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
87 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
88 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
89 	uint32_t md_len_values[] = { 0, 64 };
90 	uint8_t *num_base_bdevs;
91 	uint64_t *base_bdev_blockcnt;
92 	uint32_t *base_bdev_blocklen;
93 	uint32_t *strip_size_kb;
94 	uint32_t *md_len;
95 	struct raid_params params;
96 	uint64_t params_count;
97 	int rc;
98 
99 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
100 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
101 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
102 		       SPDK_COUNTOF(strip_size_kb_values) *
103 		       SPDK_COUNTOF(md_len_values);
104 	rc = raid_test_params_alloc(params_count);
105 	if (rc) {
106 		return rc;
107 	}
108 
109 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
110 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
111 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
112 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
113 					ARRAY_FOR_EACH(md_len_values, md_len) {
114 						params.num_base_bdevs = *num_base_bdevs;
115 						params.base_bdev_blockcnt = *base_bdev_blockcnt;
116 						params.base_bdev_blocklen = *base_bdev_blocklen;
117 						params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
118 						params.md_len = *md_len;
119 						if (params.strip_size == 0 ||
120 						    params.strip_size > *base_bdev_blockcnt) {
121 							continue;
122 						}
123 						raid_test_params_add(&params);
124 					}
125 				}
126 			}
127 		}
128 	}
129 
130 	init_accel();
131 
132 	return 0;
133 }
134 
135 static int
136 test_suite_cleanup(void)
137 {
138 	fini_accel();
139 	raid_test_params_free();
140 	return 0;
141 }
142 
143 static void
144 test_setup(void)
145 {
146 	g_test_degraded = false;
147 }
148 
149 static struct raid5f_info *
150 create_raid5f(struct raid_params *params)
151 {
152 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
153 
154 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
155 
156 	return raid_bdev->module_private;
157 }
158 
159 static void
160 delete_raid5f(struct raid5f_info *r5f_info)
161 {
162 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
163 
164 	raid5f_stop(raid_bdev);
165 
166 	raid_test_delete_raid_bdev(raid_bdev);
167 }
168 
169 static void
170 test_raid5f_start(void)
171 {
172 	struct raid_params *params;
173 
174 	RAID_PARAMS_FOR_EACH(params) {
175 		struct raid5f_info *r5f_info;
176 
177 		r5f_info = create_raid5f(params);
178 
179 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
180 
181 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
182 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
183 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
184 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
185 				(params->num_base_bdevs - 1));
186 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
187 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
188 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
189 
190 		delete_raid5f(r5f_info);
191 	}
192 }
193 
194 enum test_bdev_error_type {
195 	TEST_BDEV_ERROR_NONE,
196 	TEST_BDEV_ERROR_SUBMIT,
197 	TEST_BDEV_ERROR_COMPLETE,
198 	TEST_BDEV_ERROR_NOMEM,
199 };
200 
201 struct raid_io_info {
202 	struct raid5f_info *r5f_info;
203 	struct raid_bdev_io_channel *raid_ch;
204 	enum spdk_bdev_io_type io_type;
205 	uint64_t stripe_index;
206 	uint64_t offset_blocks;
207 	uint64_t stripe_offset_blocks;
208 	uint64_t num_blocks;
209 	void *src_buf;
210 	void *dest_buf;
211 	void *src_md_buf;
212 	void *dest_md_buf;
213 	size_t buf_size;
214 	size_t buf_md_size;
215 	void *parity_buf;
216 	void *reference_parity;
217 	size_t parity_buf_size;
218 	void *parity_md_buf;
219 	void *reference_md_parity;
220 	size_t parity_md_buf_size;
221 	void *degraded_buf;
222 	void *degraded_md_buf;
223 	enum spdk_bdev_io_status status;
224 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
225 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
226 	struct {
227 		enum test_bdev_error_type type;
228 		struct spdk_bdev *bdev;
229 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
230 		void *on_enomem_cb_ctx;
231 	} error;
232 };
233 
234 struct test_raid_bdev_io {
235 	struct raid_bdev_io raid_io;
236 	struct raid_io_info *io_info;
237 	void *buf;
238 	void *buf_md;
239 };
240 
241 void
242 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
243 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
244 {
245 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
246 			raid_io);
247 	struct raid_io_info *io_info = test_raid_bdev_io->io_info;
248 
249 	raid_io->waitq_entry.bdev = bdev;
250 	raid_io->waitq_entry.cb_fn = cb_fn;
251 	raid_io->waitq_entry.cb_arg = raid_io;
252 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
253 }
254 
255 static void
256 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
257 {
258 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
259 			raid_io);
260 
261 	test_raid_bdev_io->io_info->status = status;
262 
263 	free(raid_io->iovs);
264 	free(test_raid_bdev_io);
265 }
266 
267 static struct raid_bdev_io *
268 get_raid_io(struct raid_io_info *io_info)
269 {
270 	struct raid_bdev_io *raid_io;
271 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
272 	uint32_t blocklen = raid_bdev->bdev.blocklen;
273 	struct test_raid_bdev_io *test_raid_bdev_io;
274 	struct iovec *iovs;
275 	int iovcnt;
276 	void *md_buf;
277 	size_t iov_len, remaining;
278 	struct iovec *iov;
279 	void *buf;
280 	int i;
281 
282 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
283 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
284 
285 	test_raid_bdev_io->io_info = io_info;
286 
287 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
288 		test_raid_bdev_io->buf = io_info->src_buf;
289 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
290 		buf = io_info->dest_buf;
291 		md_buf = io_info->dest_md_buf;
292 	} else {
293 		test_raid_bdev_io->buf = io_info->dest_buf;
294 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
295 		buf = io_info->src_buf;
296 		md_buf = io_info->src_md_buf;
297 	}
298 
299 	iovcnt = 7;
300 	iovs = calloc(iovcnt, sizeof(*iovs));
301 	SPDK_CU_ASSERT_FATAL(iovs != NULL);
302 
303 	remaining = io_info->num_blocks * blocklen;
304 	iov_len = remaining / iovcnt;
305 
306 	for (i = 0; i < iovcnt; i++) {
307 		iov = &iovs[i];
308 		iov->iov_base = buf;
309 		iov->iov_len = iov_len;
310 		buf += iov_len;
311 		remaining -= iov_len;
312 	}
313 	iov->iov_len += remaining;
314 
315 	raid_io = &test_raid_bdev_io->raid_io;
316 
317 	raid_test_bdev_io_init(raid_io, raid_bdev, io_info->raid_ch, io_info->io_type,
318 			       io_info->offset_blocks, io_info->num_blocks, iovs, iovcnt, md_buf);
319 
320 	return raid_io;
321 }
322 
323 void
324 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
325 {
326 	free(bdev_io);
327 }
328 
329 static int
330 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
331 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
332 {
333 	struct spdk_bdev *bdev = desc->bdev;
334 	struct spdk_bdev_io *bdev_io;
335 
336 	if (bdev == io_info->error.bdev) {
337 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
338 			return -EINVAL;
339 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
340 			return -ENOMEM;
341 		}
342 	}
343 
344 	bdev_io = calloc(1, sizeof(*bdev_io));
345 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
346 	bdev_io->bdev = bdev;
347 	bdev_io->internal.cb = cb;
348 	bdev_io->internal.caller_ctx = cb_arg;
349 
350 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
351 
352 	return 0;
353 }
354 
355 static void
356 process_io_completions(struct raid_io_info *io_info)
357 {
358 	struct spdk_bdev_io *bdev_io;
359 	bool success;
360 
361 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
362 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
363 
364 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
365 		    io_info->error.bdev == bdev_io->bdev) {
366 			success = false;
367 		} else {
368 			success = true;
369 		}
370 
371 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
372 	}
373 
374 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
375 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
376 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
377 
378 		io_info->error.type = TEST_BDEV_ERROR_NONE;
379 
380 		if (io_info->error.on_enomem_cb != NULL) {
381 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
382 		}
383 
384 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
385 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
386 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
387 			waitq_entry->cb_fn(waitq_entry->cb_arg);
388 		}
389 
390 		process_io_completions(io_info);
391 	} else {
392 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
393 	}
394 }
395 
396 #define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
397 
398 int
399 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
400 				struct iovec *iov, int iovcnt, void *md_buf,
401 				uint64_t offset_blocks, uint64_t num_blocks,
402 				spdk_bdev_io_completion_cb cb, void *cb_arg)
403 {
404 	struct chunk *chunk = cb_arg;
405 	struct stripe_request *stripe_req;
406 	struct test_raid_bdev_io *test_raid_bdev_io;
407 	struct raid_io_info *io_info;
408 	struct raid_bdev *raid_bdev;
409 	uint8_t data_chunk_idx;
410 	uint64_t data_offset;
411 	struct iovec dest;
412 	void *dest_md_buf;
413 
414 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
415 
416 	stripe_req = raid5f_chunk_stripe_req(chunk);
417 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
418 	io_info = test_raid_bdev_io->io_info;
419 	raid_bdev = io_info->r5f_info->raid_bdev;
420 
421 	if (chunk == stripe_req->parity_chunk) {
422 		if (io_info->parity_buf == NULL) {
423 			goto submit;
424 		}
425 		dest.iov_base = io_info->parity_buf;
426 		if (md_buf != NULL) {
427 			dest_md_buf = io_info->parity_md_buf;
428 		}
429 	} else {
430 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
431 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
432 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
433 		if (md_buf != NULL) {
434 			data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
435 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
436 		}
437 	}
438 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
439 
440 	spdk_iovcpy(iov, iovcnt, &dest, 1);
441 	if (md_buf != NULL) {
442 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
443 	}
444 
445 submit:
446 	return submit_io(io_info, desc, cb, cb_arg);
447 }
448 
449 static int
450 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
451 				struct iovec *iov, int iovcnt, void *md_buf,
452 				uint64_t offset_blocks, uint64_t num_blocks,
453 				spdk_bdev_io_completion_cb cb, void *cb_arg)
454 {
455 	struct chunk *chunk = cb_arg;
456 	struct stripe_request *stripe_req;
457 	struct test_raid_bdev_io *test_raid_bdev_io;
458 	struct raid_io_info *io_info;
459 	struct raid_bdev *raid_bdev;
460 	uint8_t data_chunk_idx;
461 	void *buf, *buf_md;
462 	struct iovec src;
463 
464 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
465 
466 	stripe_req = raid5f_chunk_stripe_req(chunk);
467 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
468 	io_info = test_raid_bdev_io->io_info;
469 	raid_bdev = io_info->r5f_info->raid_bdev;
470 
471 	if (chunk == stripe_req->parity_chunk) {
472 		buf = io_info->reference_parity;
473 		buf_md = io_info->reference_md_parity;
474 	} else {
475 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
476 		buf = io_info->degraded_buf +
477 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
478 		buf_md = io_info->degraded_md_buf +
479 			 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
480 	}
481 
482 	buf += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
483 	buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
484 
485 	src.iov_base = buf;
486 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
487 
488 	spdk_iovcpy(&src, 1, iov, iovcnt);
489 	if (md_buf != NULL) {
490 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
491 	}
492 
493 	return submit_io(io_info, desc, cb, cb_arg);
494 }
495 
496 int
497 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
498 			struct iovec *iov, int iovcnt,
499 			uint64_t offset_blocks, uint64_t num_blocks,
500 			spdk_bdev_io_completion_cb cb, void *cb_arg)
501 {
502 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
503 					       cb_arg);
504 }
505 
506 int
507 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
508 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
509 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
510 			    struct spdk_bdev_ext_io_opts *opts)
511 {
512 	CU_ASSERT_PTR_NULL(opts->memory_domain);
513 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
514 
515 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
516 					       num_blocks, cb, cb_arg);
517 }
518 
519 int
520 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
521 			       struct iovec *iov, int iovcnt, void *md_buf,
522 			       uint64_t offset_blocks, uint64_t num_blocks,
523 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
524 {
525 	struct raid_bdev_io *raid_io = cb_arg;
526 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
527 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
528 			raid_io);
529 	struct iovec src;
530 
531 	if (cb == raid5f_chunk_complete_bdev_io) {
532 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
533 						       num_blocks, cb, cb_arg);
534 	}
535 
536 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
537 
538 	src.iov_base = test_raid_bdev_io->buf;
539 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
540 
541 	spdk_iovcpy(&src, 1, iov, iovcnt);
542 	if (md_buf != NULL) {
543 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
544 	}
545 
546 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
547 }
548 
549 int
550 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
551 		       struct iovec *iov, int iovcnt,
552 		       uint64_t offset_blocks, uint64_t num_blocks,
553 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
554 {
555 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
556 					      cb_arg);
557 }
558 
559 int
560 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
561 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
562 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
563 			   struct spdk_bdev_ext_io_opts *opts)
564 {
565 	CU_ASSERT_PTR_NULL(opts->memory_domain);
566 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
567 
568 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
569 					      num_blocks, cb, cb_arg);
570 }
571 
572 static void
573 xor_block(uint8_t *a, uint8_t *b, size_t size)
574 {
575 	while (size-- > 0) {
576 		a[size] ^= b[size];
577 	}
578 }
579 
580 static void
581 test_raid5f_write_request(struct raid_io_info *io_info)
582 {
583 	struct raid_bdev_io *raid_io;
584 
585 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
586 
587 	raid_io = get_raid_io(io_info);
588 
589 	raid5f_submit_rw_request(raid_io);
590 
591 	poll_threads();
592 
593 	process_io_completions(io_info);
594 
595 	if (g_test_degraded) {
596 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
597 		uint8_t p_idx;
598 		uint8_t i;
599 		off_t offset;
600 		uint32_t strip_len;
601 
602 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
603 			if (!raid_bdev_channel_get_base_channel(io_info->raid_ch, i)) {
604 				break;
605 			}
606 		}
607 
608 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
609 
610 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
611 
612 		if (i == p_idx) {
613 			return;
614 		}
615 
616 		if (i >= p_idx) {
617 			i--;
618 		}
619 
620 		strip_len = raid_bdev->strip_size_kb * 1024;
621 		offset = i * strip_len;
622 
623 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
624 		if (io_info->dest_md_buf) {
625 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
626 			offset = i * strip_len;
627 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
628 		}
629 	}
630 
631 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
632 		if (io_info->parity_buf) {
633 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
634 					 io_info->parity_buf_size) == 0);
635 		}
636 		if (io_info->parity_md_buf) {
637 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
638 					 io_info->parity_md_buf_size) == 0);
639 		}
640 	}
641 }
642 
643 static void
644 test_raid5f_read_request(struct raid_io_info *io_info)
645 {
646 	struct raid_bdev_io *raid_io;
647 
648 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
649 
650 	raid_io = get_raid_io(io_info);
651 
652 	raid5f_submit_rw_request(raid_io);
653 
654 	process_io_completions(io_info);
655 
656 	if (g_test_degraded) {
657 		/* for the reconstruct read xor callback */
658 		poll_threads();
659 	}
660 }
661 
662 static void
663 deinit_io_info(struct raid_io_info *io_info)
664 {
665 	free(io_info->src_buf);
666 	free(io_info->dest_buf);
667 	free(io_info->src_md_buf);
668 	free(io_info->dest_md_buf);
669 	free(io_info->parity_buf);
670 	free(io_info->reference_parity);
671 	free(io_info->parity_md_buf);
672 	free(io_info->reference_md_parity);
673 	free(io_info->degraded_buf);
674 	free(io_info->degraded_md_buf);
675 }
676 
677 static void
678 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
679 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
680 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
681 {
682 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
683 	uint32_t blocklen = raid_bdev->bdev.blocklen;
684 	void *src_buf, *dest_buf;
685 	void *src_md_buf, *dest_md_buf;
686 	size_t buf_size = num_blocks * blocklen;
687 	size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
688 	uint64_t block;
689 	uint64_t i;
690 
691 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
692 
693 	memset(io_info, 0, sizeof(*io_info));
694 
695 	if (buf_size) {
696 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
697 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
698 
699 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
700 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
701 
702 		memset(src_buf, 0xff, buf_size);
703 		for (block = 0; block < num_blocks; block++) {
704 			*((uint64_t *)(src_buf + block * blocklen)) = block;
705 		}
706 	} else {
707 		src_buf = NULL;
708 		dest_buf = NULL;
709 	}
710 
711 	if (buf_md_size) {
712 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
713 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
714 
715 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
716 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
717 
718 		memset(src_md_buf, 0xff, buf_md_size);
719 		for (i = 0; i < buf_md_size; i++) {
720 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
721 		}
722 	} else {
723 		src_md_buf = NULL;
724 		dest_md_buf = NULL;
725 	}
726 
727 	io_info->r5f_info = r5f_info;
728 	io_info->raid_ch = raid_ch;
729 	io_info->io_type = io_type;
730 	io_info->stripe_index = stripe_index;
731 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
732 	io_info->stripe_offset_blocks = stripe_offset_blocks;
733 	io_info->num_blocks = num_blocks;
734 	io_info->src_buf = src_buf;
735 	io_info->dest_buf = dest_buf;
736 	io_info->src_md_buf = src_md_buf;
737 	io_info->dest_md_buf = dest_md_buf;
738 	io_info->buf_size = buf_size;
739 	io_info->buf_md_size = buf_md_size;
740 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
741 
742 	TAILQ_INIT(&io_info->bdev_io_queue);
743 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
744 }
745 
746 static void
747 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
748 {
749 	struct raid5f_info *r5f_info = io_info->r5f_info;
750 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
751 	uint32_t blocklen = raid_bdev->bdev.blocklen;
752 	size_t strip_len = raid_bdev->strip_size * blocklen;
753 	unsigned i;
754 
755 	io_info->parity_buf_size = strip_len;
756 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
757 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
758 
759 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
760 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
761 
762 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
763 		xor_block(io_info->reference_parity, src, strip_len);
764 		src += strip_len;
765 	}
766 
767 	if (src_md) {
768 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
769 
770 		io_info->parity_md_buf_size = strip_md_len;
771 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
772 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
773 
774 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
775 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
776 
777 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
778 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
779 			src_md += strip_md_len;
780 		}
781 	}
782 }
783 
784 static void
785 io_info_setup_degraded(struct raid_io_info *io_info)
786 {
787 	struct raid5f_info *r5f_info = io_info->r5f_info;
788 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
789 	uint32_t blocklen = raid_bdev->bdev.blocklen;
790 	uint32_t md_len = raid_bdev->bdev.md_len;
791 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
792 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
793 
794 	io_info->degraded_buf = malloc(stripe_len);
795 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
796 
797 	memset(io_info->degraded_buf, 0xab, stripe_len);
798 
799 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
800 	       io_info->src_buf, io_info->num_blocks * blocklen);
801 
802 	if (stripe_md_len != 0) {
803 		io_info->degraded_md_buf = malloc(stripe_md_len);
804 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
805 
806 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
807 
808 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
809 		       io_info->src_md_buf, io_info->num_blocks * md_len);
810 	}
811 
812 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
813 
814 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
815 	       0xcd, io_info->num_blocks * blocklen);
816 
817 	if (stripe_md_len != 0) {
818 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
819 		       0xcd, io_info->num_blocks * md_len);
820 	}
821 }
822 
823 static void
824 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
825 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
826 			      uint64_t num_blocks)
827 {
828 	struct raid_io_info io_info;
829 
830 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
831 
832 	switch (io_type) {
833 	case SPDK_BDEV_IO_TYPE_READ:
834 		if (g_test_degraded) {
835 			io_info_setup_degraded(&io_info);
836 		}
837 		test_raid5f_read_request(&io_info);
838 		break;
839 	case SPDK_BDEV_IO_TYPE_WRITE:
840 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
841 		test_raid5f_write_request(&io_info);
842 		break;
843 	default:
844 		CU_FAIL_FATAL("unsupported io_type");
845 	}
846 
847 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
848 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
849 	if (io_info.buf_md_size) {
850 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
851 	}
852 
853 	deinit_io_info(&io_info);
854 }
855 
856 static void
857 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
858 			   struct raid_bdev_io_channel *raid_ch))
859 {
860 	struct raid_params *params;
861 
862 	RAID_PARAMS_FOR_EACH(params) {
863 		struct raid5f_info *r5f_info;
864 		struct raid_bdev_io_channel *raid_ch;
865 
866 		r5f_info = create_raid5f(params);
867 		raid_ch = raid_test_create_io_channel(r5f_info->raid_bdev);
868 
869 		if (g_test_degraded) {
870 			raid_ch->_base_channels[0] = NULL;
871 		}
872 
873 		test_fn(r5f_info->raid_bdev, raid_ch);
874 
875 		raid_test_destroy_io_channel(raid_ch);
876 		delete_raid5f(r5f_info);
877 	}
878 }
879 
880 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
881 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
882 
883 static void
884 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
885 {
886 	struct raid5f_info *r5f_info = raid_bdev->module_private;
887 	uint32_t strip_size = raid_bdev->strip_size;
888 	uint64_t stripe_index;
889 	unsigned int i;
890 
891 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
892 		uint64_t stripe_offset = i * strip_size;
893 
894 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
895 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
896 						      stripe_index, stripe_offset, 1);
897 
898 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
899 						      stripe_index, stripe_offset, strip_size);
900 
901 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
902 						      stripe_index, stripe_offset + strip_size - 1, 1);
903 			if (strip_size <= 2) {
904 				continue;
905 			}
906 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
907 						      stripe_index, stripe_offset + 1, strip_size - 2);
908 		}
909 	}
910 }
911 static void
912 test_raid5f_submit_read_request(void)
913 {
914 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
915 }
916 
917 static void
918 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
919 					struct raid_bdev_io_channel *raid_ch)
920 {
921 	struct raid5f_io_channel *r5ch = raid_bdev_channel_get_module_ctx(raid_ch);
922 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
923 	struct raid_bdev_io raid_io = {};
924 	struct stripe_request *stripe_req;
925 	struct chunk *chunk;
926 	struct iovec iovs[] = {
927 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
928 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
929 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
930 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
931 	};
932 	size_t iovcnt = SPDK_COUNTOF(iovs);
933 	int ret;
934 
935 	raid_io.raid_bdev = raid_bdev;
936 	raid_io.iovs = iovs;
937 	raid_io.iovcnt = iovcnt;
938 
939 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
940 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
941 
942 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
943 	stripe_req->raid_io = &raid_io;
944 
945 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
946 	CU_ASSERT(ret == 0);
947 
948 	chunk = &stripe_req->chunks[0];
949 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
950 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
951 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
952 
953 	chunk = &stripe_req->chunks[1];
954 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
955 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
956 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
957 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
958 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
959 
960 	if (raid_bdev->num_base_bdevs > 3) {
961 		chunk = &stripe_req->chunks[2];
962 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
963 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
964 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
965 	}
966 	if (raid_bdev->num_base_bdevs > 4) {
967 		chunk = &stripe_req->chunks[3];
968 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
969 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
970 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
971 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
972 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
973 	}
974 
975 	raid5f_stripe_request_free(stripe_req);
976 }
977 static void
978 test_raid5f_stripe_request_map_iovecs(void)
979 {
980 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
981 }
982 
983 static void
984 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
985 		struct raid_bdev_io_channel *raid_ch)
986 {
987 	struct raid5f_info *r5f_info = raid_bdev->module_private;
988 	uint64_t stripe_index;
989 
990 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
991 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
992 					      stripe_index, 0, r5f_info->stripe_blocks);
993 	}
994 }
995 static void
996 test_raid5f_submit_full_stripe_write_request(void)
997 {
998 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
999 }
1000 
1001 static void
1002 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1003 {
1004 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1005 	struct raid_base_bdev_info *base_bdev_info;
1006 	uint64_t stripe_index;
1007 	struct raid_io_info io_info;
1008 	enum test_bdev_error_type error_type;
1009 
1010 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1011 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1012 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1013 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1014 					     stripe_index, 0, r5f_info->stripe_blocks);
1015 
1016 				io_info.error.type = error_type;
1017 				io_info.error.bdev = base_bdev_info->desc->bdev;
1018 
1019 				test_raid5f_write_request(&io_info);
1020 
1021 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1022 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1023 				} else {
1024 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1025 				}
1026 
1027 				deinit_io_info(&io_info);
1028 			}
1029 		}
1030 	}
1031 }
1032 static void
1033 test_raid5f_chunk_write_error(void)
1034 {
1035 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1036 }
1037 
1038 struct chunk_write_error_with_enomem_ctx {
1039 	enum test_bdev_error_type error_type;
1040 	struct spdk_bdev *bdev;
1041 };
1042 
1043 static void
1044 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1045 {
1046 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1047 
1048 	io_info->error.type = ctx->error_type;
1049 	io_info->error.bdev = ctx->bdev;
1050 }
1051 
1052 static void
1053 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1054 		struct raid_bdev_io_channel *raid_ch)
1055 {
1056 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1057 	struct raid_base_bdev_info *base_bdev_info;
1058 	uint64_t stripe_index;
1059 	struct raid_io_info io_info;
1060 	enum test_bdev_error_type error_type;
1061 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1062 
1063 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1064 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1065 			struct raid_base_bdev_info *base_bdev_info_last =
1066 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1067 
1068 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1069 				if (base_bdev_info == base_bdev_info_last) {
1070 					continue;
1071 				}
1072 
1073 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1074 					     stripe_index, 0, r5f_info->stripe_blocks);
1075 
1076 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1077 				io_info.error.bdev = base_bdev_info->desc->bdev;
1078 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1079 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1080 				on_enomem_cb_ctx.error_type = error_type;
1081 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1082 
1083 				test_raid5f_write_request(&io_info);
1084 
1085 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1086 
1087 				deinit_io_info(&io_info);
1088 			}
1089 		}
1090 	}
1091 }
1092 static void
1093 test_raid5f_chunk_write_error_with_enomem(void)
1094 {
1095 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1096 }
1097 
1098 static void
1099 test_raid5f_submit_full_stripe_write_request_degraded(void)
1100 {
1101 	g_test_degraded = true;
1102 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1103 }
1104 
1105 static void
1106 test_raid5f_submit_read_request_degraded(void)
1107 {
1108 	g_test_degraded = true;
1109 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1110 }
1111 
1112 int
1113 main(int argc, char **argv)
1114 {
1115 	CU_pSuite suite = NULL;
1116 	unsigned int num_failures;
1117 
1118 	CU_initialize_registry();
1119 
1120 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1121 			test_setup, NULL);
1122 	CU_ADD_TEST(suite, test_raid5f_start);
1123 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1124 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1125 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1126 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1127 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1128 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1129 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1130 
1131 	allocate_threads(1);
1132 	set_thread(0);
1133 
1134 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1135 	CU_cleanup_registry();
1136 
1137 	free_threads();
1138 
1139 	return num_failures;
1140 }
1141