xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision f387b7fe187572d4505323dfb7a5dc1318638dda)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 DEFINE_STUB_V(raid_bdev_process_request_complete, (struct raid_bdev_process_request *process_req,
25 		int status));
26 DEFINE_STUB_V(raid_bdev_io_init, (struct raid_bdev_io *raid_io,
27 				  struct raid_bdev_io_channel *raid_ch,
28 				  enum spdk_bdev_io_type type, uint64_t offset_blocks,
29 				  uint64_t num_blocks, struct iovec *iovs, int iovcnt, void *md_buf,
30 				  struct spdk_memory_domain *memory_domain, void *memory_domain_ctx));
31 
32 struct spdk_io_channel *
33 spdk_accel_get_io_channel(void)
34 {
35 	return spdk_get_io_channel(g_accel_p);
36 }
37 
38 uint32_t
39 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
40 {
41 	return bdev->md_len;
42 }
43 
44 struct xor_ctx {
45 	spdk_accel_completion_cb cb_fn;
46 	void *cb_arg;
47 };
48 
49 static void
50 finish_xor(void *_ctx)
51 {
52 	struct xor_ctx *ctx = _ctx;
53 
54 	ctx->cb_fn(ctx->cb_arg, 0);
55 
56 	free(ctx);
57 }
58 
59 int
60 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
61 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
62 {
63 	struct xor_ctx *ctx;
64 
65 	ctx = malloc(sizeof(*ctx));
66 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
67 	ctx->cb_fn = cb_fn;
68 	ctx->cb_arg = cb_arg;
69 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
70 
71 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
72 
73 	return 0;
74 }
75 
76 static void
77 init_accel(void)
78 {
79 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
80 				sizeof(int), "accel_p");
81 }
82 
83 static void
84 fini_accel(void)
85 {
86 	spdk_io_device_unregister(g_accel_p, NULL);
87 }
88 
89 static int
90 test_suite_init(void)
91 {
92 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
93 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
94 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
95 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
96 	enum raid_params_md_type md_type_values[] = { RAID_PARAMS_MD_NONE, RAID_PARAMS_MD_SEPARATE };
97 	uint8_t *num_base_bdevs;
98 	uint64_t *base_bdev_blockcnt;
99 	uint32_t *base_bdev_blocklen;
100 	uint32_t *strip_size_kb;
101 	enum raid_params_md_type *md_type;
102 	uint64_t params_count;
103 	int rc;
104 
105 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
106 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
107 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
108 		       SPDK_COUNTOF(strip_size_kb_values) *
109 		       SPDK_COUNTOF(md_type_values);
110 	rc = raid_test_params_alloc(params_count);
111 	if (rc) {
112 		return rc;
113 	}
114 
115 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
116 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
117 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
118 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
119 					ARRAY_FOR_EACH(md_type_values, md_type) {
120 						struct raid_params params = {
121 							.num_base_bdevs = *num_base_bdevs,
122 							.base_bdev_blockcnt = *base_bdev_blockcnt,
123 							.base_bdev_blocklen = *base_bdev_blocklen,
124 							.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen,
125 							.md_type = *md_type,
126 						};
127 						if (params.strip_size == 0 ||
128 						    params.strip_size > params.base_bdev_blockcnt) {
129 							continue;
130 						}
131 						raid_test_params_add(&params);
132 					}
133 				}
134 			}
135 		}
136 	}
137 
138 	init_accel();
139 
140 	return 0;
141 }
142 
143 static int
144 test_suite_cleanup(void)
145 {
146 	fini_accel();
147 	raid_test_params_free();
148 	return 0;
149 }
150 
151 static void
152 test_setup(void)
153 {
154 	g_test_degraded = false;
155 }
156 
157 static struct raid5f_info *
158 create_raid5f(struct raid_params *params)
159 {
160 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
161 
162 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
163 
164 	return raid_bdev->module_private;
165 }
166 
167 static void
168 delete_raid5f(struct raid5f_info *r5f_info)
169 {
170 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
171 
172 	raid5f_stop(raid_bdev);
173 
174 	raid_test_delete_raid_bdev(raid_bdev);
175 }
176 
177 static void
178 test_raid5f_start(void)
179 {
180 	struct raid_params *params;
181 
182 	RAID_PARAMS_FOR_EACH(params) {
183 		struct raid5f_info *r5f_info;
184 
185 		r5f_info = create_raid5f(params);
186 
187 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
188 
189 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
190 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
191 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
192 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
193 				(params->num_base_bdevs - 1));
194 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
195 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
196 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
197 
198 		delete_raid5f(r5f_info);
199 	}
200 }
201 
202 enum test_bdev_error_type {
203 	TEST_BDEV_ERROR_NONE,
204 	TEST_BDEV_ERROR_SUBMIT,
205 	TEST_BDEV_ERROR_COMPLETE,
206 	TEST_BDEV_ERROR_NOMEM,
207 };
208 
209 struct raid_io_info {
210 	struct raid5f_info *r5f_info;
211 	struct raid_bdev_io_channel *raid_ch;
212 	enum spdk_bdev_io_type io_type;
213 	uint64_t stripe_index;
214 	uint64_t offset_blocks;
215 	uint64_t stripe_offset_blocks;
216 	uint64_t num_blocks;
217 	void *src_buf;
218 	void *dest_buf;
219 	void *src_md_buf;
220 	void *dest_md_buf;
221 	size_t buf_size;
222 	size_t buf_md_size;
223 	void *parity_buf;
224 	void *reference_parity;
225 	size_t parity_buf_size;
226 	void *parity_md_buf;
227 	void *reference_md_parity;
228 	size_t parity_md_buf_size;
229 	void *degraded_buf;
230 	void *degraded_md_buf;
231 	enum spdk_bdev_io_status status;
232 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
233 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
234 	struct {
235 		enum test_bdev_error_type type;
236 		struct spdk_bdev *bdev;
237 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
238 		void *on_enomem_cb_ctx;
239 	} error;
240 };
241 
242 struct test_raid_bdev_io {
243 	struct raid_bdev_io raid_io;
244 	struct raid_io_info *io_info;
245 	void *buf;
246 	void *buf_md;
247 };
248 
249 void
250 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
251 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
252 {
253 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
254 			raid_io);
255 	struct raid_io_info *io_info = test_raid_bdev_io->io_info;
256 
257 	raid_io->waitq_entry.bdev = bdev;
258 	raid_io->waitq_entry.cb_fn = cb_fn;
259 	raid_io->waitq_entry.cb_arg = raid_io;
260 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
261 }
262 
263 static void
264 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
265 {
266 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
267 			raid_io);
268 
269 	test_raid_bdev_io->io_info->status = status;
270 
271 	free(raid_io->iovs);
272 	free(test_raid_bdev_io);
273 }
274 
275 static struct raid_bdev_io *
276 get_raid_io(struct raid_io_info *io_info)
277 {
278 	struct raid_bdev_io *raid_io;
279 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
280 	uint32_t blocklen = raid_bdev->bdev.blocklen;
281 	struct test_raid_bdev_io *test_raid_bdev_io;
282 	struct iovec *iovs;
283 	int iovcnt;
284 	void *md_buf;
285 	size_t iov_len, remaining;
286 	struct iovec *iov;
287 	void *buf;
288 	int i;
289 
290 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
291 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
292 
293 	test_raid_bdev_io->io_info = io_info;
294 
295 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
296 		test_raid_bdev_io->buf = io_info->src_buf;
297 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
298 		buf = io_info->dest_buf;
299 		md_buf = io_info->dest_md_buf;
300 	} else {
301 		test_raid_bdev_io->buf = io_info->dest_buf;
302 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
303 		buf = io_info->src_buf;
304 		md_buf = io_info->src_md_buf;
305 	}
306 
307 	iovcnt = 7;
308 	iovs = calloc(iovcnt, sizeof(*iovs));
309 	SPDK_CU_ASSERT_FATAL(iovs != NULL);
310 
311 	remaining = io_info->num_blocks * blocklen;
312 	iov_len = remaining / iovcnt;
313 
314 	for (i = 0; i < iovcnt; i++) {
315 		iov = &iovs[i];
316 		iov->iov_base = buf;
317 		iov->iov_len = iov_len;
318 		buf += iov_len;
319 		remaining -= iov_len;
320 	}
321 	iov->iov_len += remaining;
322 
323 	raid_io = &test_raid_bdev_io->raid_io;
324 
325 	raid_test_bdev_io_init(raid_io, raid_bdev, io_info->raid_ch, io_info->io_type,
326 			       io_info->offset_blocks, io_info->num_blocks, iovs, iovcnt, md_buf);
327 
328 	return raid_io;
329 }
330 
331 void
332 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
333 {
334 	free(bdev_io);
335 }
336 
337 static int
338 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
339 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
340 {
341 	struct spdk_bdev *bdev = desc->bdev;
342 	struct spdk_bdev_io *bdev_io;
343 
344 	if (bdev == io_info->error.bdev) {
345 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
346 			return -EINVAL;
347 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
348 			return -ENOMEM;
349 		}
350 	}
351 
352 	bdev_io = calloc(1, sizeof(*bdev_io));
353 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
354 	bdev_io->bdev = bdev;
355 	bdev_io->internal.cb = cb;
356 	bdev_io->internal.caller_ctx = cb_arg;
357 
358 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
359 
360 	return 0;
361 }
362 
363 static void
364 process_io_completions(struct raid_io_info *io_info)
365 {
366 	struct spdk_bdev_io *bdev_io;
367 	bool success;
368 
369 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
370 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
371 
372 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
373 		    io_info->error.bdev == bdev_io->bdev) {
374 			success = false;
375 		} else {
376 			success = true;
377 		}
378 
379 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
380 	}
381 
382 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
383 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
384 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
385 
386 		io_info->error.type = TEST_BDEV_ERROR_NONE;
387 
388 		if (io_info->error.on_enomem_cb != NULL) {
389 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
390 		}
391 
392 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
393 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
394 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
395 			waitq_entry->cb_fn(waitq_entry->cb_arg);
396 		}
397 
398 		process_io_completions(io_info);
399 	} else {
400 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
401 	}
402 }
403 
404 #define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
405 
406 int
407 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
408 				struct iovec *iov, int iovcnt, void *md_buf,
409 				uint64_t offset_blocks, uint64_t num_blocks,
410 				spdk_bdev_io_completion_cb cb, void *cb_arg)
411 {
412 	struct chunk *chunk = cb_arg;
413 	struct stripe_request *stripe_req;
414 	struct test_raid_bdev_io *test_raid_bdev_io;
415 	struct raid_io_info *io_info;
416 	struct raid_bdev *raid_bdev;
417 	uint8_t data_chunk_idx;
418 	uint64_t data_offset;
419 	struct iovec dest;
420 	void *dest_md_buf;
421 
422 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
423 
424 	stripe_req = raid5f_chunk_stripe_req(chunk);
425 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
426 	io_info = test_raid_bdev_io->io_info;
427 	raid_bdev = io_info->r5f_info->raid_bdev;
428 
429 	if (chunk == stripe_req->parity_chunk) {
430 		if (io_info->parity_buf == NULL) {
431 			goto submit;
432 		}
433 		dest.iov_base = io_info->parity_buf;
434 		if (md_buf != NULL) {
435 			dest_md_buf = io_info->parity_md_buf;
436 		}
437 	} else {
438 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
439 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
440 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
441 		if (md_buf != NULL) {
442 			data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
443 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
444 		}
445 	}
446 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
447 
448 	spdk_iovcpy(iov, iovcnt, &dest, 1);
449 	if (md_buf != NULL) {
450 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
451 	}
452 
453 submit:
454 	return submit_io(io_info, desc, cb, cb_arg);
455 }
456 
457 static int
458 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
459 				struct iovec *iov, int iovcnt, void *md_buf,
460 				uint64_t offset_blocks, uint64_t num_blocks,
461 				spdk_bdev_io_completion_cb cb, void *cb_arg)
462 {
463 	struct chunk *chunk = cb_arg;
464 	struct stripe_request *stripe_req;
465 	struct test_raid_bdev_io *test_raid_bdev_io;
466 	struct raid_io_info *io_info;
467 	struct raid_bdev *raid_bdev;
468 	uint8_t data_chunk_idx;
469 	void *buf, *buf_md;
470 	struct iovec src;
471 
472 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
473 
474 	stripe_req = raid5f_chunk_stripe_req(chunk);
475 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
476 	io_info = test_raid_bdev_io->io_info;
477 	raid_bdev = io_info->r5f_info->raid_bdev;
478 
479 	if (chunk == stripe_req->parity_chunk) {
480 		buf = io_info->reference_parity;
481 		buf_md = io_info->reference_md_parity;
482 	} else {
483 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
484 		buf = io_info->degraded_buf +
485 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
486 		buf_md = io_info->degraded_md_buf +
487 			 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
488 	}
489 
490 	buf += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
491 	buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
492 
493 	src.iov_base = buf;
494 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
495 
496 	spdk_iovcpy(&src, 1, iov, iovcnt);
497 	if (md_buf != NULL) {
498 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
499 	}
500 
501 	return submit_io(io_info, desc, cb, cb_arg);
502 }
503 
504 int
505 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
506 			struct iovec *iov, int iovcnt,
507 			uint64_t offset_blocks, uint64_t num_blocks,
508 			spdk_bdev_io_completion_cb cb, void *cb_arg)
509 {
510 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
511 					       cb_arg);
512 }
513 
514 int
515 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
516 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
517 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
518 			    struct spdk_bdev_ext_io_opts *opts)
519 {
520 	CU_ASSERT_PTR_NULL(opts->memory_domain);
521 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
522 
523 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
524 					       num_blocks, cb, cb_arg);
525 }
526 
527 int
528 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
529 			       struct iovec *iov, int iovcnt, void *md_buf,
530 			       uint64_t offset_blocks, uint64_t num_blocks,
531 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
532 {
533 	struct raid_bdev_io *raid_io = cb_arg;
534 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
535 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
536 			raid_io);
537 	struct iovec src;
538 
539 	if (cb == raid5f_chunk_complete_bdev_io) {
540 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
541 						       num_blocks, cb, cb_arg);
542 	}
543 
544 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
545 
546 	src.iov_base = test_raid_bdev_io->buf;
547 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
548 
549 	spdk_iovcpy(&src, 1, iov, iovcnt);
550 	if (md_buf != NULL) {
551 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
552 	}
553 
554 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
555 }
556 
557 int
558 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
559 		       struct iovec *iov, int iovcnt,
560 		       uint64_t offset_blocks, uint64_t num_blocks,
561 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
562 {
563 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
564 					      cb_arg);
565 }
566 
567 int
568 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
569 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
570 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
571 			   struct spdk_bdev_ext_io_opts *opts)
572 {
573 	CU_ASSERT_PTR_NULL(opts->memory_domain);
574 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
575 
576 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
577 					      num_blocks, cb, cb_arg);
578 }
579 
580 static void
581 xor_block(uint8_t *a, uint8_t *b, size_t size)
582 {
583 	while (size-- > 0) {
584 		a[size] ^= b[size];
585 	}
586 }
587 
588 static void
589 test_raid5f_write_request(struct raid_io_info *io_info)
590 {
591 	struct raid_bdev_io *raid_io;
592 
593 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
594 
595 	raid_io = get_raid_io(io_info);
596 
597 	raid5f_submit_rw_request(raid_io);
598 
599 	poll_threads();
600 
601 	process_io_completions(io_info);
602 
603 	if (g_test_degraded) {
604 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
605 		uint8_t p_idx;
606 		uint8_t i;
607 		off_t offset;
608 		uint32_t strip_len;
609 
610 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
611 			if (!raid_bdev_channel_get_base_channel(io_info->raid_ch, i)) {
612 				break;
613 			}
614 		}
615 
616 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
617 
618 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
619 
620 		if (i == p_idx) {
621 			return;
622 		}
623 
624 		if (i >= p_idx) {
625 			i--;
626 		}
627 
628 		strip_len = raid_bdev->strip_size_kb * 1024;
629 		offset = i * strip_len;
630 
631 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
632 		if (io_info->dest_md_buf) {
633 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
634 			offset = i * strip_len;
635 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
636 		}
637 	}
638 
639 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
640 		if (io_info->parity_buf) {
641 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
642 					 io_info->parity_buf_size) == 0);
643 		}
644 		if (io_info->parity_md_buf) {
645 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
646 					 io_info->parity_md_buf_size) == 0);
647 		}
648 	}
649 }
650 
651 static void
652 test_raid5f_read_request(struct raid_io_info *io_info)
653 {
654 	struct raid_bdev_io *raid_io;
655 
656 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
657 
658 	raid_io = get_raid_io(io_info);
659 
660 	raid5f_submit_rw_request(raid_io);
661 
662 	process_io_completions(io_info);
663 
664 	if (g_test_degraded) {
665 		/* for the reconstruct read xor callback */
666 		poll_threads();
667 	}
668 }
669 
670 static void
671 deinit_io_info(struct raid_io_info *io_info)
672 {
673 	free(io_info->src_buf);
674 	free(io_info->dest_buf);
675 	free(io_info->src_md_buf);
676 	free(io_info->dest_md_buf);
677 	free(io_info->parity_buf);
678 	free(io_info->reference_parity);
679 	free(io_info->parity_md_buf);
680 	free(io_info->reference_md_parity);
681 	free(io_info->degraded_buf);
682 	free(io_info->degraded_md_buf);
683 }
684 
685 static void
686 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
687 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
688 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
689 {
690 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
691 	uint32_t blocklen = raid_bdev->bdev.blocklen;
692 	void *src_buf, *dest_buf;
693 	void *src_md_buf, *dest_md_buf;
694 	size_t buf_size = num_blocks * blocklen;
695 	size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
696 	uint64_t block;
697 	uint64_t i;
698 
699 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
700 
701 	memset(io_info, 0, sizeof(*io_info));
702 
703 	if (buf_size) {
704 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
705 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
706 
707 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
708 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
709 
710 		memset(src_buf, 0xff, buf_size);
711 		for (block = 0; block < num_blocks; block++) {
712 			*((uint64_t *)(src_buf + block * blocklen)) = block;
713 		}
714 	} else {
715 		src_buf = NULL;
716 		dest_buf = NULL;
717 	}
718 
719 	if (buf_md_size) {
720 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
721 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
722 
723 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
724 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
725 
726 		memset(src_md_buf, 0xff, buf_md_size);
727 		for (i = 0; i < buf_md_size; i++) {
728 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
729 		}
730 	} else {
731 		src_md_buf = NULL;
732 		dest_md_buf = NULL;
733 	}
734 
735 	io_info->r5f_info = r5f_info;
736 	io_info->raid_ch = raid_ch;
737 	io_info->io_type = io_type;
738 	io_info->stripe_index = stripe_index;
739 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
740 	io_info->stripe_offset_blocks = stripe_offset_blocks;
741 	io_info->num_blocks = num_blocks;
742 	io_info->src_buf = src_buf;
743 	io_info->dest_buf = dest_buf;
744 	io_info->src_md_buf = src_md_buf;
745 	io_info->dest_md_buf = dest_md_buf;
746 	io_info->buf_size = buf_size;
747 	io_info->buf_md_size = buf_md_size;
748 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
749 
750 	TAILQ_INIT(&io_info->bdev_io_queue);
751 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
752 }
753 
754 static void
755 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
756 {
757 	struct raid5f_info *r5f_info = io_info->r5f_info;
758 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
759 	uint32_t blocklen = raid_bdev->bdev.blocklen;
760 	size_t strip_len = raid_bdev->strip_size * blocklen;
761 	unsigned i;
762 
763 	io_info->parity_buf_size = strip_len;
764 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
765 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
766 
767 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
768 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
769 
770 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
771 		xor_block(io_info->reference_parity, src, strip_len);
772 		src += strip_len;
773 	}
774 
775 	if (src_md) {
776 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
777 
778 		io_info->parity_md_buf_size = strip_md_len;
779 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
780 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
781 
782 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
783 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
784 
785 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
786 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
787 			src_md += strip_md_len;
788 		}
789 	}
790 }
791 
792 static void
793 io_info_setup_degraded(struct raid_io_info *io_info)
794 {
795 	struct raid5f_info *r5f_info = io_info->r5f_info;
796 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
797 	uint32_t blocklen = raid_bdev->bdev.blocklen;
798 	uint32_t md_len = raid_bdev->bdev.md_len;
799 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
800 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
801 
802 	io_info->degraded_buf = malloc(stripe_len);
803 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
804 
805 	memset(io_info->degraded_buf, 0xab, stripe_len);
806 
807 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
808 	       io_info->src_buf, io_info->num_blocks * blocklen);
809 
810 	if (stripe_md_len != 0) {
811 		io_info->degraded_md_buf = malloc(stripe_md_len);
812 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
813 
814 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
815 
816 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
817 		       io_info->src_md_buf, io_info->num_blocks * md_len);
818 	}
819 
820 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
821 
822 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
823 	       0xcd, io_info->num_blocks * blocklen);
824 
825 	if (stripe_md_len != 0) {
826 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
827 		       0xcd, io_info->num_blocks * md_len);
828 	}
829 }
830 
831 static void
832 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
833 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
834 			      uint64_t num_blocks)
835 {
836 	struct raid_io_info io_info;
837 
838 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
839 
840 	switch (io_type) {
841 	case SPDK_BDEV_IO_TYPE_READ:
842 		if (g_test_degraded) {
843 			io_info_setup_degraded(&io_info);
844 		}
845 		test_raid5f_read_request(&io_info);
846 		break;
847 	case SPDK_BDEV_IO_TYPE_WRITE:
848 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
849 		test_raid5f_write_request(&io_info);
850 		break;
851 	default:
852 		CU_FAIL_FATAL("unsupported io_type");
853 	}
854 
855 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
856 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
857 	if (io_info.buf_md_size) {
858 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
859 	}
860 
861 	deinit_io_info(&io_info);
862 }
863 
864 static void
865 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
866 			   struct raid_bdev_io_channel *raid_ch))
867 {
868 	struct raid_params *params;
869 
870 	RAID_PARAMS_FOR_EACH(params) {
871 		struct raid5f_info *r5f_info;
872 		struct raid_bdev_io_channel *raid_ch;
873 
874 		r5f_info = create_raid5f(params);
875 		raid_ch = raid_test_create_io_channel(r5f_info->raid_bdev);
876 
877 		if (g_test_degraded) {
878 			raid_ch->_base_channels[0] = NULL;
879 		}
880 
881 		test_fn(r5f_info->raid_bdev, raid_ch);
882 
883 		raid_test_destroy_io_channel(raid_ch);
884 		delete_raid5f(r5f_info);
885 	}
886 }
887 
888 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
889 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
890 
891 static void
892 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
893 {
894 	struct raid5f_info *r5f_info = raid_bdev->module_private;
895 	uint32_t strip_size = raid_bdev->strip_size;
896 	uint64_t stripe_index;
897 	unsigned int i;
898 
899 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
900 		uint64_t stripe_offset = i * strip_size;
901 
902 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
903 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
904 						      stripe_index, stripe_offset, 1);
905 
906 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
907 						      stripe_index, stripe_offset, strip_size);
908 
909 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
910 						      stripe_index, stripe_offset + strip_size - 1, 1);
911 			if (strip_size <= 2) {
912 				continue;
913 			}
914 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
915 						      stripe_index, stripe_offset + 1, strip_size - 2);
916 		}
917 	}
918 }
919 static void
920 test_raid5f_submit_read_request(void)
921 {
922 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
923 }
924 
925 static void
926 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
927 					struct raid_bdev_io_channel *raid_ch)
928 {
929 	struct raid5f_io_channel *r5ch = raid_bdev_channel_get_module_ctx(raid_ch);
930 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
931 	struct raid_bdev_io raid_io = {};
932 	struct stripe_request *stripe_req;
933 	struct chunk *chunk;
934 	struct iovec iovs[] = {
935 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
936 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
937 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
938 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
939 	};
940 	size_t iovcnt = SPDK_COUNTOF(iovs);
941 	int ret;
942 
943 	raid_io.raid_bdev = raid_bdev;
944 	raid_io.iovs = iovs;
945 	raid_io.iovcnt = iovcnt;
946 
947 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
948 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
949 
950 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
951 	stripe_req->raid_io = &raid_io;
952 
953 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
954 	CU_ASSERT(ret == 0);
955 
956 	chunk = &stripe_req->chunks[0];
957 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
958 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
959 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
960 
961 	chunk = &stripe_req->chunks[1];
962 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
963 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
964 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
965 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
966 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
967 
968 	if (raid_bdev->num_base_bdevs > 3) {
969 		chunk = &stripe_req->chunks[2];
970 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
971 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
972 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
973 	}
974 	if (raid_bdev->num_base_bdevs > 4) {
975 		chunk = &stripe_req->chunks[3];
976 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
977 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
978 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
979 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
980 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
981 	}
982 
983 	raid5f_stripe_request_free(stripe_req);
984 }
985 static void
986 test_raid5f_stripe_request_map_iovecs(void)
987 {
988 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
989 }
990 
991 static void
992 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
993 		struct raid_bdev_io_channel *raid_ch)
994 {
995 	struct raid5f_info *r5f_info = raid_bdev->module_private;
996 	uint64_t stripe_index;
997 
998 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
999 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1000 					      stripe_index, 0, r5f_info->stripe_blocks);
1001 	}
1002 }
1003 static void
1004 test_raid5f_submit_full_stripe_write_request(void)
1005 {
1006 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1007 }
1008 
1009 static void
1010 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1011 {
1012 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1013 	struct raid_base_bdev_info *base_bdev_info;
1014 	uint64_t stripe_index;
1015 	struct raid_io_info io_info;
1016 	enum test_bdev_error_type error_type;
1017 
1018 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1019 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1020 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1021 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1022 					     stripe_index, 0, r5f_info->stripe_blocks);
1023 
1024 				io_info.error.type = error_type;
1025 				io_info.error.bdev = base_bdev_info->desc->bdev;
1026 
1027 				test_raid5f_write_request(&io_info);
1028 
1029 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1030 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1031 				} else {
1032 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1033 				}
1034 
1035 				deinit_io_info(&io_info);
1036 			}
1037 		}
1038 	}
1039 }
1040 static void
1041 test_raid5f_chunk_write_error(void)
1042 {
1043 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1044 }
1045 
1046 struct chunk_write_error_with_enomem_ctx {
1047 	enum test_bdev_error_type error_type;
1048 	struct spdk_bdev *bdev;
1049 };
1050 
1051 static void
1052 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1053 {
1054 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1055 
1056 	io_info->error.type = ctx->error_type;
1057 	io_info->error.bdev = ctx->bdev;
1058 }
1059 
1060 static void
1061 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1062 		struct raid_bdev_io_channel *raid_ch)
1063 {
1064 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1065 	struct raid_base_bdev_info *base_bdev_info;
1066 	uint64_t stripe_index;
1067 	struct raid_io_info io_info;
1068 	enum test_bdev_error_type error_type;
1069 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1070 
1071 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1072 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1073 			struct raid_base_bdev_info *base_bdev_info_last =
1074 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1075 
1076 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1077 				if (base_bdev_info == base_bdev_info_last) {
1078 					continue;
1079 				}
1080 
1081 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1082 					     stripe_index, 0, r5f_info->stripe_blocks);
1083 
1084 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1085 				io_info.error.bdev = base_bdev_info->desc->bdev;
1086 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1087 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1088 				on_enomem_cb_ctx.error_type = error_type;
1089 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1090 
1091 				test_raid5f_write_request(&io_info);
1092 
1093 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1094 
1095 				deinit_io_info(&io_info);
1096 			}
1097 		}
1098 	}
1099 }
1100 static void
1101 test_raid5f_chunk_write_error_with_enomem(void)
1102 {
1103 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1104 }
1105 
1106 static void
1107 test_raid5f_submit_full_stripe_write_request_degraded(void)
1108 {
1109 	g_test_degraded = true;
1110 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1111 }
1112 
1113 static void
1114 test_raid5f_submit_read_request_degraded(void)
1115 {
1116 	g_test_degraded = true;
1117 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1118 }
1119 
1120 int
1121 main(int argc, char **argv)
1122 {
1123 	CU_pSuite suite = NULL;
1124 	unsigned int num_failures;
1125 
1126 	CU_initialize_registry();
1127 
1128 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1129 			test_setup, NULL);
1130 	CU_ADD_TEST(suite, test_raid5f_start);
1131 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1132 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1133 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1134 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1135 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1136 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1137 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1138 
1139 	allocate_threads(1);
1140 	set_thread(0);
1141 
1142 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1143 	CU_cleanup_registry();
1144 
1145 	free_threads();
1146 
1147 	return num_failures;
1148 }
1149