xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 DEFINE_STUB_V(raid_bdev_process_request_complete, (struct raid_bdev_process_request *process_req,
25 		int status));
26 DEFINE_STUB_V(raid_bdev_io_init, (struct raid_bdev_io *raid_io,
27 				  struct raid_bdev_io_channel *raid_ch,
28 				  enum spdk_bdev_io_type type, uint64_t offset_blocks,
29 				  uint64_t num_blocks, struct iovec *iovs, int iovcnt, void *md_buf,
30 				  struct spdk_memory_domain *memory_domain, void *memory_domain_ctx));
31 
32 struct spdk_io_channel *
33 spdk_accel_get_io_channel(void)
34 {
35 	return spdk_get_io_channel(g_accel_p);
36 }
37 
38 uint32_t
39 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
40 {
41 	return bdev->md_len;
42 }
43 
44 struct xor_ctx {
45 	spdk_accel_completion_cb cb_fn;
46 	void *cb_arg;
47 };
48 
49 static void
50 finish_xor(void *_ctx)
51 {
52 	struct xor_ctx *ctx = _ctx;
53 
54 	ctx->cb_fn(ctx->cb_arg, 0);
55 
56 	free(ctx);
57 }
58 
59 int
60 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
61 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
62 {
63 	struct xor_ctx *ctx;
64 
65 	ctx = malloc(sizeof(*ctx));
66 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
67 	ctx->cb_fn = cb_fn;
68 	ctx->cb_arg = cb_arg;
69 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
70 
71 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
72 
73 	return 0;
74 }
75 
76 static void
77 init_accel(void)
78 {
79 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
80 				sizeof(int), "accel_p");
81 }
82 
83 static void
84 fini_accel(void)
85 {
86 	spdk_io_device_unregister(g_accel_p, NULL);
87 }
88 
89 static int
90 test_suite_init(void)
91 {
92 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
93 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
94 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
95 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
96 	uint32_t md_len_values[] = { 0, 64 };
97 	uint8_t *num_base_bdevs;
98 	uint64_t *base_bdev_blockcnt;
99 	uint32_t *base_bdev_blocklen;
100 	uint32_t *strip_size_kb;
101 	uint32_t *md_len;
102 	struct raid_params params;
103 	uint64_t params_count;
104 	int rc;
105 
106 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
107 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
108 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
109 		       SPDK_COUNTOF(strip_size_kb_values) *
110 		       SPDK_COUNTOF(md_len_values);
111 	rc = raid_test_params_alloc(params_count);
112 	if (rc) {
113 		return rc;
114 	}
115 
116 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
117 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
118 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
119 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
120 					ARRAY_FOR_EACH(md_len_values, md_len) {
121 						params.num_base_bdevs = *num_base_bdevs;
122 						params.base_bdev_blockcnt = *base_bdev_blockcnt;
123 						params.base_bdev_blocklen = *base_bdev_blocklen;
124 						params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
125 						params.md_len = *md_len;
126 						if (params.strip_size == 0 ||
127 						    params.strip_size > *base_bdev_blockcnt) {
128 							continue;
129 						}
130 						raid_test_params_add(&params);
131 					}
132 				}
133 			}
134 		}
135 	}
136 
137 	init_accel();
138 
139 	return 0;
140 }
141 
142 static int
143 test_suite_cleanup(void)
144 {
145 	fini_accel();
146 	raid_test_params_free();
147 	return 0;
148 }
149 
150 static void
151 test_setup(void)
152 {
153 	g_test_degraded = false;
154 }
155 
156 static struct raid5f_info *
157 create_raid5f(struct raid_params *params)
158 {
159 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
160 
161 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
162 
163 	return raid_bdev->module_private;
164 }
165 
166 static void
167 delete_raid5f(struct raid5f_info *r5f_info)
168 {
169 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
170 
171 	raid5f_stop(raid_bdev);
172 
173 	raid_test_delete_raid_bdev(raid_bdev);
174 }
175 
176 static void
177 test_raid5f_start(void)
178 {
179 	struct raid_params *params;
180 
181 	RAID_PARAMS_FOR_EACH(params) {
182 		struct raid5f_info *r5f_info;
183 
184 		r5f_info = create_raid5f(params);
185 
186 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
187 
188 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
189 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
190 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
191 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
192 				(params->num_base_bdevs - 1));
193 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
194 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
195 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
196 
197 		delete_raid5f(r5f_info);
198 	}
199 }
200 
201 enum test_bdev_error_type {
202 	TEST_BDEV_ERROR_NONE,
203 	TEST_BDEV_ERROR_SUBMIT,
204 	TEST_BDEV_ERROR_COMPLETE,
205 	TEST_BDEV_ERROR_NOMEM,
206 };
207 
208 struct raid_io_info {
209 	struct raid5f_info *r5f_info;
210 	struct raid_bdev_io_channel *raid_ch;
211 	enum spdk_bdev_io_type io_type;
212 	uint64_t stripe_index;
213 	uint64_t offset_blocks;
214 	uint64_t stripe_offset_blocks;
215 	uint64_t num_blocks;
216 	void *src_buf;
217 	void *dest_buf;
218 	void *src_md_buf;
219 	void *dest_md_buf;
220 	size_t buf_size;
221 	size_t buf_md_size;
222 	void *parity_buf;
223 	void *reference_parity;
224 	size_t parity_buf_size;
225 	void *parity_md_buf;
226 	void *reference_md_parity;
227 	size_t parity_md_buf_size;
228 	void *degraded_buf;
229 	void *degraded_md_buf;
230 	enum spdk_bdev_io_status status;
231 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
232 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
233 	struct {
234 		enum test_bdev_error_type type;
235 		struct spdk_bdev *bdev;
236 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
237 		void *on_enomem_cb_ctx;
238 	} error;
239 };
240 
241 struct test_raid_bdev_io {
242 	struct raid_bdev_io raid_io;
243 	struct raid_io_info *io_info;
244 	void *buf;
245 	void *buf_md;
246 };
247 
248 void
249 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
250 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
251 {
252 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
253 			raid_io);
254 	struct raid_io_info *io_info = test_raid_bdev_io->io_info;
255 
256 	raid_io->waitq_entry.bdev = bdev;
257 	raid_io->waitq_entry.cb_fn = cb_fn;
258 	raid_io->waitq_entry.cb_arg = raid_io;
259 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
260 }
261 
262 static void
263 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
264 {
265 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
266 			raid_io);
267 
268 	test_raid_bdev_io->io_info->status = status;
269 
270 	free(raid_io->iovs);
271 	free(test_raid_bdev_io);
272 }
273 
274 static struct raid_bdev_io *
275 get_raid_io(struct raid_io_info *io_info)
276 {
277 	struct raid_bdev_io *raid_io;
278 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
279 	uint32_t blocklen = raid_bdev->bdev.blocklen;
280 	struct test_raid_bdev_io *test_raid_bdev_io;
281 	struct iovec *iovs;
282 	int iovcnt;
283 	void *md_buf;
284 	size_t iov_len, remaining;
285 	struct iovec *iov;
286 	void *buf;
287 	int i;
288 
289 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
290 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
291 
292 	test_raid_bdev_io->io_info = io_info;
293 
294 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
295 		test_raid_bdev_io->buf = io_info->src_buf;
296 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
297 		buf = io_info->dest_buf;
298 		md_buf = io_info->dest_md_buf;
299 	} else {
300 		test_raid_bdev_io->buf = io_info->dest_buf;
301 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
302 		buf = io_info->src_buf;
303 		md_buf = io_info->src_md_buf;
304 	}
305 
306 	iovcnt = 7;
307 	iovs = calloc(iovcnt, sizeof(*iovs));
308 	SPDK_CU_ASSERT_FATAL(iovs != NULL);
309 
310 	remaining = io_info->num_blocks * blocklen;
311 	iov_len = remaining / iovcnt;
312 
313 	for (i = 0; i < iovcnt; i++) {
314 		iov = &iovs[i];
315 		iov->iov_base = buf;
316 		iov->iov_len = iov_len;
317 		buf += iov_len;
318 		remaining -= iov_len;
319 	}
320 	iov->iov_len += remaining;
321 
322 	raid_io = &test_raid_bdev_io->raid_io;
323 
324 	raid_test_bdev_io_init(raid_io, raid_bdev, io_info->raid_ch, io_info->io_type,
325 			       io_info->offset_blocks, io_info->num_blocks, iovs, iovcnt, md_buf);
326 
327 	return raid_io;
328 }
329 
330 void
331 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
332 {
333 	free(bdev_io);
334 }
335 
336 static int
337 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
338 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
339 {
340 	struct spdk_bdev *bdev = desc->bdev;
341 	struct spdk_bdev_io *bdev_io;
342 
343 	if (bdev == io_info->error.bdev) {
344 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
345 			return -EINVAL;
346 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
347 			return -ENOMEM;
348 		}
349 	}
350 
351 	bdev_io = calloc(1, sizeof(*bdev_io));
352 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
353 	bdev_io->bdev = bdev;
354 	bdev_io->internal.cb = cb;
355 	bdev_io->internal.caller_ctx = cb_arg;
356 
357 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
358 
359 	return 0;
360 }
361 
362 static void
363 process_io_completions(struct raid_io_info *io_info)
364 {
365 	struct spdk_bdev_io *bdev_io;
366 	bool success;
367 
368 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
369 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
370 
371 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
372 		    io_info->error.bdev == bdev_io->bdev) {
373 			success = false;
374 		} else {
375 			success = true;
376 		}
377 
378 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
379 	}
380 
381 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
382 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
383 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
384 
385 		io_info->error.type = TEST_BDEV_ERROR_NONE;
386 
387 		if (io_info->error.on_enomem_cb != NULL) {
388 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
389 		}
390 
391 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
392 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
393 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
394 			waitq_entry->cb_fn(waitq_entry->cb_arg);
395 		}
396 
397 		process_io_completions(io_info);
398 	} else {
399 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
400 	}
401 }
402 
403 #define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
404 
405 int
406 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
407 				struct iovec *iov, int iovcnt, void *md_buf,
408 				uint64_t offset_blocks, uint64_t num_blocks,
409 				spdk_bdev_io_completion_cb cb, void *cb_arg)
410 {
411 	struct chunk *chunk = cb_arg;
412 	struct stripe_request *stripe_req;
413 	struct test_raid_bdev_io *test_raid_bdev_io;
414 	struct raid_io_info *io_info;
415 	struct raid_bdev *raid_bdev;
416 	uint8_t data_chunk_idx;
417 	uint64_t data_offset;
418 	struct iovec dest;
419 	void *dest_md_buf;
420 
421 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
422 
423 	stripe_req = raid5f_chunk_stripe_req(chunk);
424 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
425 	io_info = test_raid_bdev_io->io_info;
426 	raid_bdev = io_info->r5f_info->raid_bdev;
427 
428 	if (chunk == stripe_req->parity_chunk) {
429 		if (io_info->parity_buf == NULL) {
430 			goto submit;
431 		}
432 		dest.iov_base = io_info->parity_buf;
433 		if (md_buf != NULL) {
434 			dest_md_buf = io_info->parity_md_buf;
435 		}
436 	} else {
437 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
438 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
439 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
440 		if (md_buf != NULL) {
441 			data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
442 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
443 		}
444 	}
445 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
446 
447 	spdk_iovcpy(iov, iovcnt, &dest, 1);
448 	if (md_buf != NULL) {
449 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
450 	}
451 
452 submit:
453 	return submit_io(io_info, desc, cb, cb_arg);
454 }
455 
456 static int
457 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
458 				struct iovec *iov, int iovcnt, void *md_buf,
459 				uint64_t offset_blocks, uint64_t num_blocks,
460 				spdk_bdev_io_completion_cb cb, void *cb_arg)
461 {
462 	struct chunk *chunk = cb_arg;
463 	struct stripe_request *stripe_req;
464 	struct test_raid_bdev_io *test_raid_bdev_io;
465 	struct raid_io_info *io_info;
466 	struct raid_bdev *raid_bdev;
467 	uint8_t data_chunk_idx;
468 	void *buf, *buf_md;
469 	struct iovec src;
470 
471 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
472 
473 	stripe_req = raid5f_chunk_stripe_req(chunk);
474 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
475 	io_info = test_raid_bdev_io->io_info;
476 	raid_bdev = io_info->r5f_info->raid_bdev;
477 
478 	if (chunk == stripe_req->parity_chunk) {
479 		buf = io_info->reference_parity;
480 		buf_md = io_info->reference_md_parity;
481 	} else {
482 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
483 		buf = io_info->degraded_buf +
484 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
485 		buf_md = io_info->degraded_md_buf +
486 			 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
487 	}
488 
489 	buf += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
490 	buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
491 
492 	src.iov_base = buf;
493 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
494 
495 	spdk_iovcpy(&src, 1, iov, iovcnt);
496 	if (md_buf != NULL) {
497 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
498 	}
499 
500 	return submit_io(io_info, desc, cb, cb_arg);
501 }
502 
503 int
504 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
505 			struct iovec *iov, int iovcnt,
506 			uint64_t offset_blocks, uint64_t num_blocks,
507 			spdk_bdev_io_completion_cb cb, void *cb_arg)
508 {
509 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
510 					       cb_arg);
511 }
512 
513 int
514 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
515 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
516 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
517 			    struct spdk_bdev_ext_io_opts *opts)
518 {
519 	CU_ASSERT_PTR_NULL(opts->memory_domain);
520 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
521 
522 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
523 					       num_blocks, cb, cb_arg);
524 }
525 
526 int
527 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
528 			       struct iovec *iov, int iovcnt, void *md_buf,
529 			       uint64_t offset_blocks, uint64_t num_blocks,
530 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
531 {
532 	struct raid_bdev_io *raid_io = cb_arg;
533 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
534 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
535 			raid_io);
536 	struct iovec src;
537 
538 	if (cb == raid5f_chunk_complete_bdev_io) {
539 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
540 						       num_blocks, cb, cb_arg);
541 	}
542 
543 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
544 
545 	src.iov_base = test_raid_bdev_io->buf;
546 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
547 
548 	spdk_iovcpy(&src, 1, iov, iovcnt);
549 	if (md_buf != NULL) {
550 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
551 	}
552 
553 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
554 }
555 
556 int
557 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
558 		       struct iovec *iov, int iovcnt,
559 		       uint64_t offset_blocks, uint64_t num_blocks,
560 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
561 {
562 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
563 					      cb_arg);
564 }
565 
566 int
567 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
568 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
569 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
570 			   struct spdk_bdev_ext_io_opts *opts)
571 {
572 	CU_ASSERT_PTR_NULL(opts->memory_domain);
573 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
574 
575 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
576 					      num_blocks, cb, cb_arg);
577 }
578 
579 static void
580 xor_block(uint8_t *a, uint8_t *b, size_t size)
581 {
582 	while (size-- > 0) {
583 		a[size] ^= b[size];
584 	}
585 }
586 
587 static void
588 test_raid5f_write_request(struct raid_io_info *io_info)
589 {
590 	struct raid_bdev_io *raid_io;
591 
592 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
593 
594 	raid_io = get_raid_io(io_info);
595 
596 	raid5f_submit_rw_request(raid_io);
597 
598 	poll_threads();
599 
600 	process_io_completions(io_info);
601 
602 	if (g_test_degraded) {
603 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
604 		uint8_t p_idx;
605 		uint8_t i;
606 		off_t offset;
607 		uint32_t strip_len;
608 
609 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
610 			if (!raid_bdev_channel_get_base_channel(io_info->raid_ch, i)) {
611 				break;
612 			}
613 		}
614 
615 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
616 
617 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
618 
619 		if (i == p_idx) {
620 			return;
621 		}
622 
623 		if (i >= p_idx) {
624 			i--;
625 		}
626 
627 		strip_len = raid_bdev->strip_size_kb * 1024;
628 		offset = i * strip_len;
629 
630 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
631 		if (io_info->dest_md_buf) {
632 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
633 			offset = i * strip_len;
634 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
635 		}
636 	}
637 
638 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
639 		if (io_info->parity_buf) {
640 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
641 					 io_info->parity_buf_size) == 0);
642 		}
643 		if (io_info->parity_md_buf) {
644 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
645 					 io_info->parity_md_buf_size) == 0);
646 		}
647 	}
648 }
649 
650 static void
651 test_raid5f_read_request(struct raid_io_info *io_info)
652 {
653 	struct raid_bdev_io *raid_io;
654 
655 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
656 
657 	raid_io = get_raid_io(io_info);
658 
659 	raid5f_submit_rw_request(raid_io);
660 
661 	process_io_completions(io_info);
662 
663 	if (g_test_degraded) {
664 		/* for the reconstruct read xor callback */
665 		poll_threads();
666 	}
667 }
668 
669 static void
670 deinit_io_info(struct raid_io_info *io_info)
671 {
672 	free(io_info->src_buf);
673 	free(io_info->dest_buf);
674 	free(io_info->src_md_buf);
675 	free(io_info->dest_md_buf);
676 	free(io_info->parity_buf);
677 	free(io_info->reference_parity);
678 	free(io_info->parity_md_buf);
679 	free(io_info->reference_md_parity);
680 	free(io_info->degraded_buf);
681 	free(io_info->degraded_md_buf);
682 }
683 
684 static void
685 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
686 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
687 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
688 {
689 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
690 	uint32_t blocklen = raid_bdev->bdev.blocklen;
691 	void *src_buf, *dest_buf;
692 	void *src_md_buf, *dest_md_buf;
693 	size_t buf_size = num_blocks * blocklen;
694 	size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
695 	uint64_t block;
696 	uint64_t i;
697 
698 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
699 
700 	memset(io_info, 0, sizeof(*io_info));
701 
702 	if (buf_size) {
703 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
704 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
705 
706 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
707 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
708 
709 		memset(src_buf, 0xff, buf_size);
710 		for (block = 0; block < num_blocks; block++) {
711 			*((uint64_t *)(src_buf + block * blocklen)) = block;
712 		}
713 	} else {
714 		src_buf = NULL;
715 		dest_buf = NULL;
716 	}
717 
718 	if (buf_md_size) {
719 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
720 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
721 
722 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
723 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
724 
725 		memset(src_md_buf, 0xff, buf_md_size);
726 		for (i = 0; i < buf_md_size; i++) {
727 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
728 		}
729 	} else {
730 		src_md_buf = NULL;
731 		dest_md_buf = NULL;
732 	}
733 
734 	io_info->r5f_info = r5f_info;
735 	io_info->raid_ch = raid_ch;
736 	io_info->io_type = io_type;
737 	io_info->stripe_index = stripe_index;
738 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
739 	io_info->stripe_offset_blocks = stripe_offset_blocks;
740 	io_info->num_blocks = num_blocks;
741 	io_info->src_buf = src_buf;
742 	io_info->dest_buf = dest_buf;
743 	io_info->src_md_buf = src_md_buf;
744 	io_info->dest_md_buf = dest_md_buf;
745 	io_info->buf_size = buf_size;
746 	io_info->buf_md_size = buf_md_size;
747 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
748 
749 	TAILQ_INIT(&io_info->bdev_io_queue);
750 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
751 }
752 
753 static void
754 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
755 {
756 	struct raid5f_info *r5f_info = io_info->r5f_info;
757 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
758 	uint32_t blocklen = raid_bdev->bdev.blocklen;
759 	size_t strip_len = raid_bdev->strip_size * blocklen;
760 	unsigned i;
761 
762 	io_info->parity_buf_size = strip_len;
763 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
764 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
765 
766 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
767 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
768 
769 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
770 		xor_block(io_info->reference_parity, src, strip_len);
771 		src += strip_len;
772 	}
773 
774 	if (src_md) {
775 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
776 
777 		io_info->parity_md_buf_size = strip_md_len;
778 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
779 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
780 
781 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
782 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
783 
784 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
785 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
786 			src_md += strip_md_len;
787 		}
788 	}
789 }
790 
791 static void
792 io_info_setup_degraded(struct raid_io_info *io_info)
793 {
794 	struct raid5f_info *r5f_info = io_info->r5f_info;
795 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
796 	uint32_t blocklen = raid_bdev->bdev.blocklen;
797 	uint32_t md_len = raid_bdev->bdev.md_len;
798 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
799 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
800 
801 	io_info->degraded_buf = malloc(stripe_len);
802 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
803 
804 	memset(io_info->degraded_buf, 0xab, stripe_len);
805 
806 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
807 	       io_info->src_buf, io_info->num_blocks * blocklen);
808 
809 	if (stripe_md_len != 0) {
810 		io_info->degraded_md_buf = malloc(stripe_md_len);
811 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
812 
813 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
814 
815 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
816 		       io_info->src_md_buf, io_info->num_blocks * md_len);
817 	}
818 
819 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
820 
821 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
822 	       0xcd, io_info->num_blocks * blocklen);
823 
824 	if (stripe_md_len != 0) {
825 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
826 		       0xcd, io_info->num_blocks * md_len);
827 	}
828 }
829 
830 static void
831 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
832 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
833 			      uint64_t num_blocks)
834 {
835 	struct raid_io_info io_info;
836 
837 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
838 
839 	switch (io_type) {
840 	case SPDK_BDEV_IO_TYPE_READ:
841 		if (g_test_degraded) {
842 			io_info_setup_degraded(&io_info);
843 		}
844 		test_raid5f_read_request(&io_info);
845 		break;
846 	case SPDK_BDEV_IO_TYPE_WRITE:
847 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
848 		test_raid5f_write_request(&io_info);
849 		break;
850 	default:
851 		CU_FAIL_FATAL("unsupported io_type");
852 	}
853 
854 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
855 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
856 	if (io_info.buf_md_size) {
857 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
858 	}
859 
860 	deinit_io_info(&io_info);
861 }
862 
863 static void
864 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
865 			   struct raid_bdev_io_channel *raid_ch))
866 {
867 	struct raid_params *params;
868 
869 	RAID_PARAMS_FOR_EACH(params) {
870 		struct raid5f_info *r5f_info;
871 		struct raid_bdev_io_channel *raid_ch;
872 
873 		r5f_info = create_raid5f(params);
874 		raid_ch = raid_test_create_io_channel(r5f_info->raid_bdev);
875 
876 		if (g_test_degraded) {
877 			raid_ch->_base_channels[0] = NULL;
878 		}
879 
880 		test_fn(r5f_info->raid_bdev, raid_ch);
881 
882 		raid_test_destroy_io_channel(raid_ch);
883 		delete_raid5f(r5f_info);
884 	}
885 }
886 
887 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
888 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
889 
890 static void
891 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
892 {
893 	struct raid5f_info *r5f_info = raid_bdev->module_private;
894 	uint32_t strip_size = raid_bdev->strip_size;
895 	uint64_t stripe_index;
896 	unsigned int i;
897 
898 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
899 		uint64_t stripe_offset = i * strip_size;
900 
901 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
902 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
903 						      stripe_index, stripe_offset, 1);
904 
905 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
906 						      stripe_index, stripe_offset, strip_size);
907 
908 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
909 						      stripe_index, stripe_offset + strip_size - 1, 1);
910 			if (strip_size <= 2) {
911 				continue;
912 			}
913 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
914 						      stripe_index, stripe_offset + 1, strip_size - 2);
915 		}
916 	}
917 }
918 static void
919 test_raid5f_submit_read_request(void)
920 {
921 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
922 }
923 
924 static void
925 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
926 					struct raid_bdev_io_channel *raid_ch)
927 {
928 	struct raid5f_io_channel *r5ch = raid_bdev_channel_get_module_ctx(raid_ch);
929 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
930 	struct raid_bdev_io raid_io = {};
931 	struct stripe_request *stripe_req;
932 	struct chunk *chunk;
933 	struct iovec iovs[] = {
934 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
935 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
936 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
937 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
938 	};
939 	size_t iovcnt = SPDK_COUNTOF(iovs);
940 	int ret;
941 
942 	raid_io.raid_bdev = raid_bdev;
943 	raid_io.iovs = iovs;
944 	raid_io.iovcnt = iovcnt;
945 
946 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
947 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
948 
949 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
950 	stripe_req->raid_io = &raid_io;
951 
952 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
953 	CU_ASSERT(ret == 0);
954 
955 	chunk = &stripe_req->chunks[0];
956 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
957 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
958 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
959 
960 	chunk = &stripe_req->chunks[1];
961 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
962 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
963 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
964 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
965 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
966 
967 	if (raid_bdev->num_base_bdevs > 3) {
968 		chunk = &stripe_req->chunks[2];
969 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
970 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
971 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
972 	}
973 	if (raid_bdev->num_base_bdevs > 4) {
974 		chunk = &stripe_req->chunks[3];
975 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
976 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
977 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
978 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
979 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
980 	}
981 
982 	raid5f_stripe_request_free(stripe_req);
983 }
984 static void
985 test_raid5f_stripe_request_map_iovecs(void)
986 {
987 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
988 }
989 
990 static void
991 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
992 		struct raid_bdev_io_channel *raid_ch)
993 {
994 	struct raid5f_info *r5f_info = raid_bdev->module_private;
995 	uint64_t stripe_index;
996 
997 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
998 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
999 					      stripe_index, 0, r5f_info->stripe_blocks);
1000 	}
1001 }
1002 static void
1003 test_raid5f_submit_full_stripe_write_request(void)
1004 {
1005 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1006 }
1007 
1008 static void
1009 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1010 {
1011 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1012 	struct raid_base_bdev_info *base_bdev_info;
1013 	uint64_t stripe_index;
1014 	struct raid_io_info io_info;
1015 	enum test_bdev_error_type error_type;
1016 
1017 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1018 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1019 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1020 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1021 					     stripe_index, 0, r5f_info->stripe_blocks);
1022 
1023 				io_info.error.type = error_type;
1024 				io_info.error.bdev = base_bdev_info->desc->bdev;
1025 
1026 				test_raid5f_write_request(&io_info);
1027 
1028 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1029 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1030 				} else {
1031 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1032 				}
1033 
1034 				deinit_io_info(&io_info);
1035 			}
1036 		}
1037 	}
1038 }
1039 static void
1040 test_raid5f_chunk_write_error(void)
1041 {
1042 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1043 }
1044 
1045 struct chunk_write_error_with_enomem_ctx {
1046 	enum test_bdev_error_type error_type;
1047 	struct spdk_bdev *bdev;
1048 };
1049 
1050 static void
1051 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1052 {
1053 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1054 
1055 	io_info->error.type = ctx->error_type;
1056 	io_info->error.bdev = ctx->bdev;
1057 }
1058 
1059 static void
1060 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1061 		struct raid_bdev_io_channel *raid_ch)
1062 {
1063 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1064 	struct raid_base_bdev_info *base_bdev_info;
1065 	uint64_t stripe_index;
1066 	struct raid_io_info io_info;
1067 	enum test_bdev_error_type error_type;
1068 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1069 
1070 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1071 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1072 			struct raid_base_bdev_info *base_bdev_info_last =
1073 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1074 
1075 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1076 				if (base_bdev_info == base_bdev_info_last) {
1077 					continue;
1078 				}
1079 
1080 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1081 					     stripe_index, 0, r5f_info->stripe_blocks);
1082 
1083 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1084 				io_info.error.bdev = base_bdev_info->desc->bdev;
1085 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1086 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1087 				on_enomem_cb_ctx.error_type = error_type;
1088 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1089 
1090 				test_raid5f_write_request(&io_info);
1091 
1092 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1093 
1094 				deinit_io_info(&io_info);
1095 			}
1096 		}
1097 	}
1098 }
1099 static void
1100 test_raid5f_chunk_write_error_with_enomem(void)
1101 {
1102 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1103 }
1104 
1105 static void
1106 test_raid5f_submit_full_stripe_write_request_degraded(void)
1107 {
1108 	g_test_degraded = true;
1109 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1110 }
1111 
1112 static void
1113 test_raid5f_submit_read_request_degraded(void)
1114 {
1115 	g_test_degraded = true;
1116 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1117 }
1118 
1119 int
1120 main(int argc, char **argv)
1121 {
1122 	CU_pSuite suite = NULL;
1123 	unsigned int num_failures;
1124 
1125 	CU_initialize_registry();
1126 
1127 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1128 			test_setup, NULL);
1129 	CU_ADD_TEST(suite, test_raid5f_start);
1130 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1131 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1132 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1133 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1134 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1135 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1136 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1137 
1138 	allocate_threads(1);
1139 	set_thread(0);
1140 
1141 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1142 	CU_cleanup_registry();
1143 
1144 	free_threads();
1145 
1146 	return num_failures;
1147 }
1148