xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision ddd4603ceb2154dd59f14c6f2851f5f8cd1711c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 DEFINE_STUB_V(raid_bdev_process_request_complete, (struct raid_bdev_process_request *process_req,
25 		int status));
26 DEFINE_STUB_V(raid_bdev_io_init, (struct raid_bdev_io *raid_io,
27 				  struct raid_bdev_io_channel *raid_ch,
28 				  enum spdk_bdev_io_type type, uint64_t offset_blocks,
29 				  uint64_t num_blocks, struct iovec *iovs, int iovcnt, void *md_buf,
30 				  struct spdk_memory_domain *memory_domain, void *memory_domain_ctx));
31 DEFINE_STUB(raid_bdev_remap_dix_reftag, int, (void *md_buf, uint64_t num_blocks,
32 		struct spdk_bdev *bdev, uint32_t remapped_offset), -1);
33 
34 struct spdk_io_channel *
35 spdk_accel_get_io_channel(void)
36 {
37 	return spdk_get_io_channel(g_accel_p);
38 }
39 
40 struct xor_ctx {
41 	spdk_accel_completion_cb cb_fn;
42 	void *cb_arg;
43 };
44 
45 static void
46 finish_xor(void *_ctx)
47 {
48 	struct xor_ctx *ctx = _ctx;
49 
50 	ctx->cb_fn(ctx->cb_arg, 0);
51 
52 	free(ctx);
53 }
54 
55 int
56 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
57 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
58 {
59 	struct xor_ctx *ctx;
60 
61 	ctx = malloc(sizeof(*ctx));
62 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
63 	ctx->cb_fn = cb_fn;
64 	ctx->cb_arg = cb_arg;
65 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
66 
67 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
68 
69 	return 0;
70 }
71 
72 static void
73 init_accel(void)
74 {
75 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
76 				sizeof(int), "accel_p");
77 }
78 
79 static void
80 fini_accel(void)
81 {
82 	spdk_io_device_unregister(g_accel_p, NULL);
83 }
84 
85 static int
86 test_suite_init(void)
87 {
88 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
89 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
90 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
91 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
92 	enum raid_params_md_type md_type_values[] = { RAID_PARAMS_MD_NONE, RAID_PARAMS_MD_SEPARATE, RAID_PARAMS_MD_INTERLEAVED };
93 	uint8_t *num_base_bdevs;
94 	uint64_t *base_bdev_blockcnt;
95 	uint32_t *base_bdev_blocklen;
96 	uint32_t *strip_size_kb;
97 	enum raid_params_md_type *md_type;
98 	uint64_t params_count;
99 	int rc;
100 
101 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
102 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
103 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
104 		       SPDK_COUNTOF(strip_size_kb_values) *
105 		       SPDK_COUNTOF(md_type_values);
106 	rc = raid_test_params_alloc(params_count);
107 	if (rc) {
108 		return rc;
109 	}
110 
111 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
112 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
113 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
114 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
115 					ARRAY_FOR_EACH(md_type_values, md_type) {
116 						struct raid_params params = {
117 							.num_base_bdevs = *num_base_bdevs,
118 							.base_bdev_blockcnt = *base_bdev_blockcnt,
119 							.base_bdev_blocklen = *base_bdev_blocklen,
120 							.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen,
121 							.md_type = *md_type,
122 						};
123 						if (params.strip_size == 0 ||
124 						    params.strip_size > params.base_bdev_blockcnt) {
125 							continue;
126 						}
127 						raid_test_params_add(&params);
128 					}
129 				}
130 			}
131 		}
132 	}
133 
134 	init_accel();
135 
136 	return 0;
137 }
138 
139 static int
140 test_suite_cleanup(void)
141 {
142 	fini_accel();
143 	raid_test_params_free();
144 	return 0;
145 }
146 
147 static void
148 test_setup(void)
149 {
150 	g_test_degraded = false;
151 }
152 
153 static struct raid5f_info *
154 create_raid5f(struct raid_params *params)
155 {
156 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
157 
158 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
159 
160 	return raid_bdev->module_private;
161 }
162 
163 static void
164 delete_raid5f(struct raid5f_info *r5f_info)
165 {
166 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
167 
168 	raid5f_stop(raid_bdev);
169 
170 	raid_test_delete_raid_bdev(raid_bdev);
171 }
172 
173 static void
174 test_raid5f_start(void)
175 {
176 	struct raid_params *params;
177 
178 	RAID_PARAMS_FOR_EACH(params) {
179 		struct raid5f_info *r5f_info;
180 
181 		r5f_info = create_raid5f(params);
182 
183 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
184 
185 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
186 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
187 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
188 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
189 				(params->num_base_bdevs - 1));
190 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
191 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
192 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
193 
194 		delete_raid5f(r5f_info);
195 	}
196 }
197 
198 enum test_bdev_error_type {
199 	TEST_BDEV_ERROR_NONE,
200 	TEST_BDEV_ERROR_SUBMIT,
201 	TEST_BDEV_ERROR_COMPLETE,
202 	TEST_BDEV_ERROR_NOMEM,
203 };
204 
205 struct raid_io_info {
206 	struct raid5f_info *r5f_info;
207 	struct raid_bdev_io_channel *raid_ch;
208 	enum spdk_bdev_io_type io_type;
209 	uint64_t stripe_index;
210 	uint64_t offset_blocks;
211 	uint64_t stripe_offset_blocks;
212 	uint64_t num_blocks;
213 	void *src_buf;
214 	void *dest_buf;
215 	void *src_md_buf;
216 	void *dest_md_buf;
217 	size_t buf_size;
218 	size_t buf_md_size;
219 	void *parity_buf;
220 	void *reference_parity;
221 	size_t parity_buf_size;
222 	void *parity_md_buf;
223 	void *reference_md_parity;
224 	size_t parity_md_buf_size;
225 	void *degraded_buf;
226 	void *degraded_md_buf;
227 	enum spdk_bdev_io_status status;
228 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
229 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
230 	struct {
231 		enum test_bdev_error_type type;
232 		struct spdk_bdev *bdev;
233 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
234 		void *on_enomem_cb_ctx;
235 	} error;
236 };
237 
238 struct test_raid_bdev_io {
239 	struct raid_bdev_io raid_io;
240 	struct raid_io_info *io_info;
241 	void *buf;
242 	void *buf_md;
243 };
244 
245 void
246 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
247 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
248 {
249 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
250 			raid_io);
251 	struct raid_io_info *io_info = test_raid_bdev_io->io_info;
252 
253 	raid_io->waitq_entry.bdev = bdev;
254 	raid_io->waitq_entry.cb_fn = cb_fn;
255 	raid_io->waitq_entry.cb_arg = raid_io;
256 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
257 }
258 
259 void
260 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
261 {
262 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
263 			raid_io);
264 
265 	test_raid_bdev_io->io_info->status = status;
266 
267 	free(raid_io->iovs);
268 	free(test_raid_bdev_io);
269 }
270 
271 static struct raid_bdev_io *
272 get_raid_io(struct raid_io_info *io_info)
273 {
274 	struct raid_bdev_io *raid_io;
275 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
276 	uint32_t blocklen = raid_bdev->bdev.blocklen;
277 	struct test_raid_bdev_io *test_raid_bdev_io;
278 	struct iovec *iovs;
279 	int iovcnt;
280 	void *md_buf;
281 	size_t iov_len, remaining;
282 	struct iovec *iov;
283 	void *buf;
284 	int i;
285 
286 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
287 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
288 
289 	test_raid_bdev_io->io_info = io_info;
290 
291 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
292 		test_raid_bdev_io->buf = io_info->src_buf;
293 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
294 		buf = io_info->dest_buf;
295 		md_buf = io_info->dest_md_buf;
296 	} else {
297 		test_raid_bdev_io->buf = io_info->dest_buf;
298 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
299 		buf = io_info->src_buf;
300 		md_buf = io_info->src_md_buf;
301 	}
302 
303 	iovcnt = 7;
304 	iovs = calloc(iovcnt, sizeof(*iovs));
305 	SPDK_CU_ASSERT_FATAL(iovs != NULL);
306 
307 	remaining = io_info->num_blocks * blocklen;
308 	iov_len = remaining / iovcnt;
309 
310 	for (i = 0; i < iovcnt; i++) {
311 		iov = &iovs[i];
312 		iov->iov_base = buf;
313 		iov->iov_len = iov_len;
314 		buf += iov_len;
315 		remaining -= iov_len;
316 	}
317 	iov->iov_len += remaining;
318 
319 	raid_io = &test_raid_bdev_io->raid_io;
320 
321 	raid_test_bdev_io_init(raid_io, raid_bdev, io_info->raid_ch, io_info->io_type,
322 			       io_info->offset_blocks, io_info->num_blocks, iovs, iovcnt, md_buf);
323 
324 	return raid_io;
325 }
326 
327 void
328 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
329 {
330 	free(bdev_io);
331 }
332 
333 static int
334 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
335 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
336 {
337 	struct spdk_bdev *bdev = desc->bdev;
338 	struct spdk_bdev_io *bdev_io;
339 
340 	if (bdev == io_info->error.bdev) {
341 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
342 			return -EINVAL;
343 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
344 			return -ENOMEM;
345 		}
346 	}
347 
348 	bdev_io = calloc(1, sizeof(*bdev_io));
349 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
350 	bdev_io->bdev = bdev;
351 	bdev_io->internal.cb = cb;
352 	bdev_io->internal.caller_ctx = cb_arg;
353 
354 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
355 
356 	return 0;
357 }
358 
359 static void
360 process_io_completions(struct raid_io_info *io_info)
361 {
362 	struct spdk_bdev_io *bdev_io;
363 	bool success;
364 
365 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
366 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
367 
368 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
369 		    io_info->error.bdev == bdev_io->bdev) {
370 			success = false;
371 		} else {
372 			success = true;
373 		}
374 
375 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
376 	}
377 
378 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
379 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
380 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
381 
382 		io_info->error.type = TEST_BDEV_ERROR_NONE;
383 
384 		if (io_info->error.on_enomem_cb != NULL) {
385 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
386 		}
387 
388 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
389 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
390 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
391 			waitq_entry->cb_fn(waitq_entry->cb_arg);
392 		}
393 
394 		process_io_completions(io_info);
395 	} else {
396 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
397 	}
398 }
399 
400 int
401 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
402 				struct iovec *iov, int iovcnt, void *md_buf,
403 				uint64_t offset_blocks, uint64_t num_blocks,
404 				spdk_bdev_io_completion_cb cb, void *cb_arg)
405 {
406 	struct chunk *chunk = cb_arg;
407 	struct stripe_request *stripe_req;
408 	struct test_raid_bdev_io *test_raid_bdev_io;
409 	struct raid_io_info *io_info;
410 	struct raid5f_info *r5f_info;
411 	struct raid_bdev *raid_bdev;
412 	uint8_t data_chunk_idx;
413 	uint64_t data_offset;
414 	struct iovec dest;
415 	void *dest_md_buf;
416 
417 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
418 
419 	stripe_req = raid5f_chunk_stripe_req(chunk);
420 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
421 	io_info = test_raid_bdev_io->io_info;
422 	r5f_info = io_info->r5f_info;
423 	raid_bdev = r5f_info->raid_bdev;
424 
425 	if (chunk == stripe_req->parity_chunk) {
426 		if (io_info->parity_buf == NULL) {
427 			goto submit;
428 		}
429 		dest.iov_base = io_info->parity_buf;
430 		if (md_buf != NULL) {
431 			dest_md_buf = io_info->parity_md_buf;
432 		}
433 	} else {
434 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
435 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
436 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
437 		if (md_buf != NULL) {
438 			data_offset = (data_offset >> r5f_info->blocklen_shift) * raid_bdev->bdev.md_len;
439 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
440 		}
441 	}
442 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
443 
444 	spdk_iovcpy(iov, iovcnt, &dest, 1);
445 	if (md_buf != NULL) {
446 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
447 	}
448 
449 submit:
450 	return submit_io(io_info, desc, cb, cb_arg);
451 }
452 
453 static int
454 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
455 				struct iovec *iov, int iovcnt, void *md_buf,
456 				uint64_t offset_blocks, uint64_t num_blocks,
457 				spdk_bdev_io_completion_cb cb, void *cb_arg)
458 {
459 	struct chunk *chunk = cb_arg;
460 	struct stripe_request *stripe_req;
461 	struct test_raid_bdev_io *test_raid_bdev_io;
462 	struct raid_io_info *io_info;
463 	struct raid_bdev *raid_bdev;
464 	uint8_t data_chunk_idx = 0;
465 	void *buf, *buf_md;
466 	struct iovec src;
467 
468 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
469 
470 	stripe_req = raid5f_chunk_stripe_req(chunk);
471 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
472 	io_info = test_raid_bdev_io->io_info;
473 	raid_bdev = io_info->r5f_info->raid_bdev;
474 
475 	if (chunk == stripe_req->parity_chunk) {
476 		buf = io_info->reference_parity;
477 	} else {
478 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
479 		buf = io_info->degraded_buf +
480 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
481 	}
482 	src.iov_base = buf + (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
483 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
484 
485 	spdk_iovcpy(&src, 1, iov, iovcnt);
486 	if (md_buf != NULL) {
487 		if (chunk == stripe_req->parity_chunk) {
488 			buf_md = io_info->reference_md_parity;
489 		} else {
490 			buf_md = io_info->degraded_md_buf +
491 				 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
492 		}
493 		buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
494 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
495 	}
496 
497 	return submit_io(io_info, desc, cb, cb_arg);
498 }
499 
500 int
501 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
502 			struct iovec *iov, int iovcnt,
503 			uint64_t offset_blocks, uint64_t num_blocks,
504 			spdk_bdev_io_completion_cb cb, void *cb_arg)
505 {
506 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
507 					       cb_arg);
508 }
509 
510 int
511 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
512 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
513 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
514 			    struct spdk_bdev_ext_io_opts *opts)
515 {
516 	CU_ASSERT_PTR_NULL(opts->memory_domain);
517 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
518 
519 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
520 					       num_blocks, cb, cb_arg);
521 }
522 
523 int
524 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
525 			       struct iovec *iov, int iovcnt, void *md_buf,
526 			       uint64_t offset_blocks, uint64_t num_blocks,
527 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
528 {
529 	struct raid_bdev_io *raid_io = cb_arg;
530 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
531 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
532 			raid_io);
533 	struct iovec src;
534 
535 	if (cb == raid5f_chunk_complete_bdev_io) {
536 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
537 						       num_blocks, cb, cb_arg);
538 	}
539 
540 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
541 
542 	src.iov_base = test_raid_bdev_io->buf;
543 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
544 
545 	spdk_iovcpy(&src, 1, iov, iovcnt);
546 	if (md_buf != NULL) {
547 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
548 	}
549 
550 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
551 }
552 
553 int
554 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
555 		       struct iovec *iov, int iovcnt,
556 		       uint64_t offset_blocks, uint64_t num_blocks,
557 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
558 {
559 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
560 					      cb_arg);
561 }
562 
563 int
564 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
565 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
566 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
567 			   struct spdk_bdev_ext_io_opts *opts)
568 {
569 	CU_ASSERT_PTR_NULL(opts->memory_domain);
570 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
571 
572 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
573 					      num_blocks, cb, cb_arg);
574 }
575 
576 static void
577 xor_block(uint8_t *a, uint8_t *b, size_t size)
578 {
579 	while (size-- > 0) {
580 		a[size] ^= b[size];
581 	}
582 }
583 
584 static void
585 test_raid5f_write_request(struct raid_io_info *io_info)
586 {
587 	struct raid_bdev_io *raid_io;
588 
589 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
590 
591 	raid_io = get_raid_io(io_info);
592 
593 	raid5f_submit_rw_request(raid_io);
594 
595 	poll_threads();
596 
597 	process_io_completions(io_info);
598 
599 	if (g_test_degraded) {
600 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
601 		uint8_t p_idx;
602 		uint8_t i;
603 		off_t offset;
604 		uint32_t strip_len;
605 
606 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
607 			if (!raid_bdev_channel_get_base_channel(io_info->raid_ch, i)) {
608 				break;
609 			}
610 		}
611 
612 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
613 
614 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
615 
616 		if (i == p_idx) {
617 			return;
618 		}
619 
620 		if (i >= p_idx) {
621 			i--;
622 		}
623 
624 		strip_len = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
625 		offset = i * strip_len;
626 
627 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
628 		if (io_info->dest_md_buf) {
629 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
630 			offset = i * strip_len;
631 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
632 		}
633 	}
634 
635 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
636 		if (io_info->parity_buf) {
637 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
638 					 io_info->parity_buf_size) == 0);
639 		}
640 		if (io_info->parity_md_buf) {
641 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
642 					 io_info->parity_md_buf_size) == 0);
643 		}
644 	}
645 }
646 
647 static void
648 test_raid5f_read_request(struct raid_io_info *io_info)
649 {
650 	struct raid_bdev_io *raid_io;
651 
652 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
653 
654 	raid_io = get_raid_io(io_info);
655 
656 	raid5f_submit_rw_request(raid_io);
657 
658 	process_io_completions(io_info);
659 
660 	if (g_test_degraded) {
661 		/* for the reconstruct read xor callback */
662 		poll_threads();
663 	}
664 }
665 
666 static void
667 deinit_io_info(struct raid_io_info *io_info)
668 {
669 	free(io_info->src_buf);
670 	free(io_info->dest_buf);
671 	free(io_info->src_md_buf);
672 	free(io_info->dest_md_buf);
673 	free(io_info->parity_buf);
674 	free(io_info->reference_parity);
675 	free(io_info->parity_md_buf);
676 	free(io_info->reference_md_parity);
677 	free(io_info->degraded_buf);
678 	free(io_info->degraded_md_buf);
679 }
680 
681 static void
682 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
683 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
684 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
685 {
686 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
687 	uint32_t blocklen = raid_bdev->bdev.blocklen;
688 	void *src_buf, *dest_buf;
689 	void *src_md_buf, *dest_md_buf;
690 	size_t buf_size = num_blocks * blocklen;
691 	size_t buf_md_size = raid_bdev->bdev.md_interleave ? 0 : num_blocks * raid_bdev->bdev.md_len;
692 	uint64_t block;
693 	uint64_t i;
694 
695 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
696 
697 	memset(io_info, 0, sizeof(*io_info));
698 
699 	if (buf_size) {
700 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
701 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
702 
703 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
704 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
705 
706 		memset(src_buf, 0xff, buf_size);
707 		for (block = 0; block < num_blocks; block++) {
708 			*((uint64_t *)(src_buf + block * blocklen)) = block;
709 		}
710 	} else {
711 		src_buf = NULL;
712 		dest_buf = NULL;
713 	}
714 
715 	if (buf_md_size) {
716 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
717 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
718 
719 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
720 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
721 
722 		memset(src_md_buf, 0xff, buf_md_size);
723 		for (i = 0; i < buf_md_size; i++) {
724 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
725 		}
726 	} else {
727 		src_md_buf = NULL;
728 		dest_md_buf = NULL;
729 	}
730 
731 	io_info->r5f_info = r5f_info;
732 	io_info->raid_ch = raid_ch;
733 	io_info->io_type = io_type;
734 	io_info->stripe_index = stripe_index;
735 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
736 	io_info->stripe_offset_blocks = stripe_offset_blocks;
737 	io_info->num_blocks = num_blocks;
738 	io_info->src_buf = src_buf;
739 	io_info->dest_buf = dest_buf;
740 	io_info->src_md_buf = src_md_buf;
741 	io_info->dest_md_buf = dest_md_buf;
742 	io_info->buf_size = buf_size;
743 	io_info->buf_md_size = buf_md_size;
744 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
745 
746 	TAILQ_INIT(&io_info->bdev_io_queue);
747 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
748 }
749 
750 static void
751 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
752 {
753 	struct raid5f_info *r5f_info = io_info->r5f_info;
754 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
755 	uint32_t blocklen = raid_bdev->bdev.blocklen;
756 	size_t strip_len = raid_bdev->strip_size * blocklen;
757 	unsigned i;
758 
759 	io_info->parity_buf_size = strip_len;
760 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
761 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
762 
763 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
764 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
765 
766 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
767 		xor_block(io_info->reference_parity, src, strip_len);
768 		src += strip_len;
769 	}
770 
771 	if (src_md) {
772 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
773 
774 		SPDK_CU_ASSERT_FATAL(raid_bdev->bdev.md_interleave == 0);
775 
776 		io_info->parity_md_buf_size = strip_md_len;
777 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
778 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
779 
780 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
781 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
782 
783 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
784 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
785 			src_md += strip_md_len;
786 		}
787 	}
788 }
789 
790 static void
791 io_info_setup_degraded(struct raid_io_info *io_info)
792 {
793 	struct raid5f_info *r5f_info = io_info->r5f_info;
794 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
795 	uint32_t blocklen = raid_bdev->bdev.blocklen;
796 	uint32_t md_len = raid_bdev->bdev.md_interleave ? 0 : raid_bdev->bdev.md_len;
797 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
798 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
799 
800 	io_info->degraded_buf = malloc(stripe_len);
801 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
802 
803 	memset(io_info->degraded_buf, 0xab, stripe_len);
804 
805 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
806 	       io_info->src_buf, io_info->num_blocks * blocklen);
807 
808 	if (stripe_md_len != 0) {
809 		io_info->degraded_md_buf = malloc(stripe_md_len);
810 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
811 
812 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
813 
814 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
815 		       io_info->src_md_buf, io_info->num_blocks * md_len);
816 	}
817 
818 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
819 
820 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
821 	       0xcd, io_info->num_blocks * blocklen);
822 
823 	if (stripe_md_len != 0) {
824 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
825 		       0xcd, io_info->num_blocks * md_len);
826 	}
827 }
828 
829 static void
830 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
831 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
832 			      uint64_t num_blocks)
833 {
834 	struct raid_io_info io_info;
835 
836 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
837 
838 	switch (io_type) {
839 	case SPDK_BDEV_IO_TYPE_READ:
840 		if (g_test_degraded) {
841 			io_info_setup_degraded(&io_info);
842 		}
843 		test_raid5f_read_request(&io_info);
844 		break;
845 	case SPDK_BDEV_IO_TYPE_WRITE:
846 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
847 		test_raid5f_write_request(&io_info);
848 		break;
849 	default:
850 		CU_FAIL_FATAL("unsupported io_type");
851 	}
852 
853 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
854 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
855 	if (io_info.buf_md_size) {
856 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
857 	}
858 
859 	deinit_io_info(&io_info);
860 }
861 
862 static void
863 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
864 			   struct raid_bdev_io_channel *raid_ch))
865 {
866 	struct raid_params *params;
867 
868 	RAID_PARAMS_FOR_EACH(params) {
869 		struct raid5f_info *r5f_info;
870 		struct raid_bdev_io_channel *raid_ch;
871 
872 		r5f_info = create_raid5f(params);
873 		raid_ch = raid_test_create_io_channel(r5f_info->raid_bdev);
874 
875 		if (g_test_degraded) {
876 			raid_ch->_base_channels[0] = NULL;
877 		}
878 
879 		test_fn(r5f_info->raid_bdev, raid_ch);
880 
881 		raid_test_destroy_io_channel(raid_ch);
882 		delete_raid5f(r5f_info);
883 	}
884 }
885 
886 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
887 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
888 
889 static void
890 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
891 {
892 	struct raid5f_info *r5f_info = raid_bdev->module_private;
893 	uint32_t strip_size = raid_bdev->strip_size;
894 	uint64_t stripe_index;
895 	unsigned int i;
896 
897 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
898 		uint64_t stripe_offset = i * strip_size;
899 
900 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
901 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
902 						      stripe_index, stripe_offset, 1);
903 
904 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
905 						      stripe_index, stripe_offset, strip_size);
906 
907 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
908 						      stripe_index, stripe_offset + strip_size - 1, 1);
909 			if (strip_size <= 2) {
910 				continue;
911 			}
912 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
913 						      stripe_index, stripe_offset + 1, strip_size - 2);
914 		}
915 	}
916 }
917 static void
918 test_raid5f_submit_read_request(void)
919 {
920 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
921 }
922 
923 static void
924 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
925 					struct raid_bdev_io_channel *raid_ch)
926 {
927 	struct raid5f_io_channel *r5ch = raid_bdev_channel_get_module_ctx(raid_ch);
928 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
929 	struct raid_bdev_io raid_io = {};
930 	struct stripe_request *stripe_req;
931 	struct chunk *chunk;
932 	struct iovec iovs[] = {
933 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
934 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
935 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
936 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
937 	};
938 	size_t iovcnt = SPDK_COUNTOF(iovs);
939 	int ret;
940 
941 	raid_io.raid_bdev = raid_bdev;
942 	raid_io.iovs = iovs;
943 	raid_io.iovcnt = iovcnt;
944 
945 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
946 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
947 
948 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
949 	stripe_req->raid_io = &raid_io;
950 
951 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
952 	CU_ASSERT(ret == 0);
953 
954 	chunk = &stripe_req->chunks[0];
955 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
956 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
957 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
958 
959 	chunk = &stripe_req->chunks[1];
960 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
961 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
962 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
963 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
964 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
965 
966 	if (raid_bdev->num_base_bdevs > 3) {
967 		chunk = &stripe_req->chunks[2];
968 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
969 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
970 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
971 	}
972 	if (raid_bdev->num_base_bdevs > 4) {
973 		chunk = &stripe_req->chunks[3];
974 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
975 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
976 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
977 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
978 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
979 	}
980 
981 	raid5f_stripe_request_free(stripe_req);
982 }
983 static void
984 test_raid5f_stripe_request_map_iovecs(void)
985 {
986 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
987 }
988 
989 static void
990 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
991 		struct raid_bdev_io_channel *raid_ch)
992 {
993 	struct raid5f_info *r5f_info = raid_bdev->module_private;
994 	uint64_t stripe_index;
995 
996 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
997 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
998 					      stripe_index, 0, r5f_info->stripe_blocks);
999 	}
1000 }
1001 static void
1002 test_raid5f_submit_full_stripe_write_request(void)
1003 {
1004 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1005 }
1006 
1007 static void
1008 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1009 {
1010 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1011 	struct raid_base_bdev_info *base_bdev_info;
1012 	uint64_t stripe_index;
1013 	struct raid_io_info io_info;
1014 	enum test_bdev_error_type error_type;
1015 
1016 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1017 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1018 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1019 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1020 					     stripe_index, 0, r5f_info->stripe_blocks);
1021 
1022 				io_info.error.type = error_type;
1023 				io_info.error.bdev = base_bdev_info->desc->bdev;
1024 
1025 				test_raid5f_write_request(&io_info);
1026 
1027 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1028 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1029 				} else {
1030 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1031 				}
1032 
1033 				deinit_io_info(&io_info);
1034 			}
1035 		}
1036 	}
1037 }
1038 static void
1039 test_raid5f_chunk_write_error(void)
1040 {
1041 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1042 }
1043 
1044 struct chunk_write_error_with_enomem_ctx {
1045 	enum test_bdev_error_type error_type;
1046 	struct spdk_bdev *bdev;
1047 };
1048 
1049 static void
1050 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1051 {
1052 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1053 
1054 	io_info->error.type = ctx->error_type;
1055 	io_info->error.bdev = ctx->bdev;
1056 }
1057 
1058 static void
1059 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1060 		struct raid_bdev_io_channel *raid_ch)
1061 {
1062 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1063 	struct raid_base_bdev_info *base_bdev_info;
1064 	uint64_t stripe_index;
1065 	struct raid_io_info io_info;
1066 	enum test_bdev_error_type error_type;
1067 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1068 
1069 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1070 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1071 			struct raid_base_bdev_info *base_bdev_info_last =
1072 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1073 
1074 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1075 				if (base_bdev_info == base_bdev_info_last) {
1076 					continue;
1077 				}
1078 
1079 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1080 					     stripe_index, 0, r5f_info->stripe_blocks);
1081 
1082 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1083 				io_info.error.bdev = base_bdev_info->desc->bdev;
1084 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1085 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1086 				on_enomem_cb_ctx.error_type = error_type;
1087 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1088 
1089 				test_raid5f_write_request(&io_info);
1090 
1091 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1092 
1093 				deinit_io_info(&io_info);
1094 			}
1095 		}
1096 	}
1097 }
1098 static void
1099 test_raid5f_chunk_write_error_with_enomem(void)
1100 {
1101 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1102 }
1103 
1104 static void
1105 test_raid5f_submit_full_stripe_write_request_degraded(void)
1106 {
1107 	g_test_degraded = true;
1108 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1109 }
1110 
1111 static void
1112 test_raid5f_submit_read_request_degraded(void)
1113 {
1114 	g_test_degraded = true;
1115 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1116 }
1117 
1118 int
1119 main(int argc, char **argv)
1120 {
1121 	CU_pSuite suite = NULL;
1122 	unsigned int num_failures;
1123 
1124 	CU_initialize_registry();
1125 
1126 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1127 			test_setup, NULL);
1128 	CU_ADD_TEST(suite, test_raid5f_start);
1129 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1130 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1131 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1132 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1133 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1134 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1135 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1136 
1137 	allocate_threads(1);
1138 	set_thread(0);
1139 
1140 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1141 	CU_cleanup_registry();
1142 
1143 	free_threads();
1144 
1145 	return num_failures;
1146 }
1147