xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 
25 struct spdk_io_channel *
26 spdk_accel_get_io_channel(void)
27 {
28 	return spdk_get_io_channel(g_accel_p);
29 }
30 
31 void *
32 spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
33 {
34 	return bdev_io->u.bdev.md_buf;
35 }
36 
37 uint32_t
38 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
39 {
40 	return bdev->md_len;
41 }
42 
43 struct xor_ctx {
44 	spdk_accel_completion_cb cb_fn;
45 	void *cb_arg;
46 };
47 
48 static void
49 finish_xor(void *_ctx)
50 {
51 	struct xor_ctx *ctx = _ctx;
52 
53 	ctx->cb_fn(ctx->cb_arg, 0);
54 
55 	free(ctx);
56 }
57 
58 int
59 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
60 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
61 {
62 	struct xor_ctx *ctx;
63 
64 	ctx = malloc(sizeof(*ctx));
65 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
66 	ctx->cb_fn = cb_fn;
67 	ctx->cb_arg = cb_arg;
68 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
69 
70 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
71 
72 	return 0;
73 }
74 
75 void
76 raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
77 {
78 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
79 
80 	if (bdev_io->internal.cb) {
81 		bdev_io->internal.cb(bdev_io, status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->internal.caller_ctx);
82 	}
83 }
84 
85 bool
86 raid_bdev_io_complete_part(struct raid_bdev_io *raid_io, uint64_t completed,
87 			   enum spdk_bdev_io_status status)
88 {
89 	assert(raid_io->base_bdev_io_remaining >= completed);
90 	raid_io->base_bdev_io_remaining -= completed;
91 
92 	if (status != SPDK_BDEV_IO_STATUS_SUCCESS) {
93 		raid_io->base_bdev_io_status = status;
94 	}
95 
96 	if (raid_io->base_bdev_io_remaining == 0) {
97 		raid_bdev_io_complete(raid_io, raid_io->base_bdev_io_status);
98 		return true;
99 	} else {
100 		return false;
101 	}
102 }
103 
104 static void
105 init_accel(void)
106 {
107 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
108 				sizeof(int), "accel_p");
109 }
110 
111 static void
112 fini_accel(void)
113 {
114 	spdk_io_device_unregister(g_accel_p, NULL);
115 }
116 
117 static int
118 test_suite_init(void)
119 {
120 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
121 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
122 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
123 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
124 	uint32_t md_len_values[] = { 0, 64 };
125 	uint8_t *num_base_bdevs;
126 	uint64_t *base_bdev_blockcnt;
127 	uint32_t *base_bdev_blocklen;
128 	uint32_t *strip_size_kb;
129 	uint32_t *md_len;
130 	struct raid_params params;
131 	uint64_t params_count;
132 	int rc;
133 
134 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
135 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
136 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
137 		       SPDK_COUNTOF(strip_size_kb_values) *
138 		       SPDK_COUNTOF(md_len_values);
139 	rc = raid_test_params_alloc(params_count);
140 	if (rc) {
141 		return rc;
142 	}
143 
144 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
145 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
146 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
147 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
148 					ARRAY_FOR_EACH(md_len_values, md_len) {
149 						params.num_base_bdevs = *num_base_bdevs;
150 						params.base_bdev_blockcnt = *base_bdev_blockcnt;
151 						params.base_bdev_blocklen = *base_bdev_blocklen;
152 						params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
153 						params.md_len = *md_len;
154 						if (params.strip_size == 0 ||
155 						    params.strip_size > *base_bdev_blockcnt) {
156 							continue;
157 						}
158 						raid_test_params_add(&params);
159 					}
160 				}
161 			}
162 		}
163 	}
164 
165 	init_accel();
166 
167 	return 0;
168 }
169 
170 static int
171 test_suite_cleanup(void)
172 {
173 	fini_accel();
174 	raid_test_params_free();
175 	return 0;
176 }
177 
178 static void
179 test_setup(void)
180 {
181 	g_test_degraded = false;
182 }
183 
184 static struct raid5f_info *
185 create_raid5f(struct raid_params *params)
186 {
187 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
188 
189 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
190 
191 	return raid_bdev->module_private;
192 }
193 
194 static void
195 delete_raid5f(struct raid5f_info *r5f_info)
196 {
197 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
198 
199 	raid5f_stop(raid_bdev);
200 
201 	raid_test_delete_raid_bdev(raid_bdev);
202 }
203 
204 static void
205 test_raid5f_start(void)
206 {
207 	struct raid_params *params;
208 
209 	RAID_PARAMS_FOR_EACH(params) {
210 		struct raid5f_info *r5f_info;
211 
212 		r5f_info = create_raid5f(params);
213 
214 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
215 
216 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
217 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
218 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
219 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
220 				(params->num_base_bdevs - 1));
221 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
222 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
223 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
224 
225 		delete_raid5f(r5f_info);
226 	}
227 }
228 
229 enum test_bdev_error_type {
230 	TEST_BDEV_ERROR_NONE,
231 	TEST_BDEV_ERROR_SUBMIT,
232 	TEST_BDEV_ERROR_COMPLETE,
233 	TEST_BDEV_ERROR_NOMEM,
234 };
235 
236 struct raid_io_info {
237 	struct raid5f_info *r5f_info;
238 	struct raid_bdev_io_channel *raid_ch;
239 	enum spdk_bdev_io_type io_type;
240 	uint64_t stripe_index;
241 	uint64_t offset_blocks;
242 	uint64_t stripe_offset_blocks;
243 	uint64_t num_blocks;
244 	void *src_buf;
245 	void *dest_buf;
246 	void *src_md_buf;
247 	void *dest_md_buf;
248 	size_t buf_size;
249 	size_t buf_md_size;
250 	void *parity_buf;
251 	void *reference_parity;
252 	size_t parity_buf_size;
253 	void *parity_md_buf;
254 	void *reference_md_parity;
255 	size_t parity_md_buf_size;
256 	void *degraded_buf;
257 	void *degraded_md_buf;
258 	enum spdk_bdev_io_status status;
259 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
260 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
261 	struct {
262 		enum test_bdev_error_type type;
263 		struct spdk_bdev *bdev;
264 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
265 		void *on_enomem_cb_ctx;
266 	} error;
267 };
268 
269 struct test_raid_bdev_io {
270 	char bdev_io_buf[sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)];
271 	struct raid_io_info *io_info;
272 	void *buf;
273 	void *buf_md;
274 };
275 
276 void
277 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
278 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
279 {
280 	struct raid_io_info *io_info;
281 
282 	io_info = ((struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io))->io_info;
283 
284 	raid_io->waitq_entry.bdev = bdev;
285 	raid_io->waitq_entry.cb_fn = cb_fn;
286 	raid_io->waitq_entry.cb_arg = raid_io;
287 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
288 }
289 
290 static void
291 raid_bdev_io_completion_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
292 {
293 	struct raid_io_info *io_info = cb_arg;
294 
295 	spdk_bdev_free_io(bdev_io);
296 
297 	if (!success) {
298 		io_info->status = SPDK_BDEV_IO_STATUS_FAILED;
299 	} else {
300 		io_info->status = SPDK_BDEV_IO_STATUS_SUCCESS;
301 	}
302 }
303 
304 static struct raid_bdev_io *
305 get_raid_io(struct raid_io_info *io_info)
306 {
307 	struct spdk_bdev_io *bdev_io;
308 	struct raid_bdev_io *raid_io;
309 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
310 	uint32_t blocklen = raid_bdev->bdev.blocklen;
311 	struct test_raid_bdev_io *test_raid_bdev_io;
312 	size_t iov_len, remaining;
313 	struct iovec *iov;
314 	void *buf;
315 	int i;
316 
317 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
318 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
319 
320 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io->bdev_io_buf == (char *)test_raid_bdev_io);
321 	bdev_io = (struct spdk_bdev_io *)test_raid_bdev_io->bdev_io_buf;
322 	bdev_io->bdev = &raid_bdev->bdev;
323 	bdev_io->type = io_info->io_type;
324 	bdev_io->u.bdev.offset_blocks = io_info->offset_blocks;
325 	bdev_io->u.bdev.num_blocks = io_info->num_blocks;
326 	bdev_io->internal.cb = raid_bdev_io_completion_cb;
327 	bdev_io->internal.caller_ctx = io_info;
328 
329 	raid_io = (void *)bdev_io->driver_ctx;
330 	raid_io->raid_bdev = raid_bdev;
331 	raid_io->raid_ch = io_info->raid_ch;
332 	raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
333 
334 	test_raid_bdev_io->io_info = io_info;
335 
336 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
337 		test_raid_bdev_io->buf = io_info->src_buf;
338 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
339 		buf = io_info->dest_buf;
340 		bdev_io->u.bdev.md_buf = io_info->dest_md_buf;
341 	} else {
342 		test_raid_bdev_io->buf = io_info->dest_buf;
343 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
344 		buf = io_info->src_buf;
345 		bdev_io->u.bdev.md_buf = io_info->src_md_buf;
346 	}
347 
348 	bdev_io->u.bdev.iovcnt = 7;
349 	bdev_io->u.bdev.iovs = calloc(bdev_io->u.bdev.iovcnt, sizeof(*bdev_io->u.bdev.iovs));
350 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
351 
352 	remaining = io_info->num_blocks * blocklen;
353 	iov_len = remaining / bdev_io->u.bdev.iovcnt;
354 
355 	for (i = 0; i < bdev_io->u.bdev.iovcnt; i++) {
356 		iov = &bdev_io->u.bdev.iovs[i];
357 		iov->iov_base = buf;
358 		iov->iov_len = iov_len;
359 		buf += iov_len;
360 		remaining -= iov_len;
361 	}
362 	iov->iov_len += remaining;
363 
364 	return raid_io;
365 }
366 
367 void
368 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
369 {
370 	free(bdev_io->u.bdev.iovs);
371 	free(bdev_io);
372 }
373 
374 static int
375 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
376 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
377 {
378 	struct spdk_bdev *bdev = desc->bdev;
379 	struct spdk_bdev_io *bdev_io;
380 
381 	if (bdev == io_info->error.bdev) {
382 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
383 			return -EINVAL;
384 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
385 			return -ENOMEM;
386 		}
387 	}
388 
389 	bdev_io = calloc(1, sizeof(*bdev_io));
390 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
391 	bdev_io->bdev = bdev;
392 	bdev_io->internal.cb = cb;
393 	bdev_io->internal.caller_ctx = cb_arg;
394 
395 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
396 
397 	return 0;
398 }
399 
400 static void
401 process_io_completions(struct raid_io_info *io_info)
402 {
403 	struct spdk_bdev_io *bdev_io;
404 	bool success;
405 
406 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
407 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
408 
409 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
410 		    io_info->error.bdev == bdev_io->bdev) {
411 			success = false;
412 		} else {
413 			success = true;
414 		}
415 
416 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
417 	}
418 
419 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
420 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
421 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
422 
423 		io_info->error.type = TEST_BDEV_ERROR_NONE;
424 
425 		if (io_info->error.on_enomem_cb != NULL) {
426 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
427 		}
428 
429 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
430 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
431 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
432 			waitq_entry->cb_fn(waitq_entry->cb_arg);
433 		}
434 
435 		process_io_completions(io_info);
436 	} else {
437 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
438 	}
439 }
440 
441 #define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
442 
443 int
444 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
445 				struct iovec *iov, int iovcnt, void *md_buf,
446 				uint64_t offset_blocks, uint64_t num_blocks,
447 				spdk_bdev_io_completion_cb cb, void *cb_arg)
448 {
449 	struct chunk *chunk = cb_arg;
450 	struct stripe_request *stripe_req;
451 	struct test_raid_bdev_io *test_raid_bdev_io;
452 	struct raid_io_info *io_info;
453 	struct raid_bdev *raid_bdev;
454 	uint8_t data_chunk_idx;
455 	uint64_t data_offset;
456 	struct iovec dest;
457 	void *dest_md_buf;
458 
459 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
460 
461 	stripe_req = raid5f_chunk_stripe_req(chunk);
462 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(stripe_req->raid_io);
463 	io_info = test_raid_bdev_io->io_info;
464 	raid_bdev = io_info->r5f_info->raid_bdev;
465 
466 	if (chunk == stripe_req->parity_chunk) {
467 		if (io_info->parity_buf == NULL) {
468 			goto submit;
469 		}
470 		dest.iov_base = io_info->parity_buf;
471 		if (md_buf != NULL) {
472 			dest_md_buf = io_info->parity_md_buf;
473 		}
474 	} else {
475 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
476 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
477 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
478 		if (md_buf != NULL) {
479 			data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
480 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
481 		}
482 	}
483 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
484 
485 	spdk_iovcpy(iov, iovcnt, &dest, 1);
486 	if (md_buf != NULL) {
487 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
488 	}
489 
490 submit:
491 	return submit_io(io_info, desc, cb, cb_arg);
492 }
493 
494 static int
495 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
496 				struct iovec *iov, int iovcnt, void *md_buf,
497 				uint64_t offset_blocks, uint64_t num_blocks,
498 				spdk_bdev_io_completion_cb cb, void *cb_arg)
499 {
500 	struct chunk *chunk = cb_arg;
501 	struct stripe_request *stripe_req;
502 	struct test_raid_bdev_io *test_raid_bdev_io;
503 	struct raid_io_info *io_info;
504 	struct raid_bdev *raid_bdev;
505 	uint8_t data_chunk_idx;
506 	void *buf, *buf_md;
507 	struct iovec src;
508 
509 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
510 
511 	stripe_req = raid5f_chunk_stripe_req(chunk);
512 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(stripe_req->raid_io);
513 	io_info = test_raid_bdev_io->io_info;
514 	raid_bdev = io_info->r5f_info->raid_bdev;
515 
516 	if (chunk == stripe_req->parity_chunk) {
517 		buf = io_info->reference_parity;
518 		buf_md = io_info->reference_md_parity;
519 	} else {
520 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
521 		buf = io_info->degraded_buf +
522 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
523 		buf_md = io_info->degraded_md_buf +
524 			 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
525 	}
526 
527 	buf += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
528 	buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
529 
530 	src.iov_base = buf;
531 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
532 
533 	spdk_iovcpy(&src, 1, iov, iovcnt);
534 	if (md_buf != NULL) {
535 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
536 	}
537 
538 	return submit_io(io_info, desc, cb, cb_arg);
539 }
540 
541 int
542 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
543 			struct iovec *iov, int iovcnt,
544 			uint64_t offset_blocks, uint64_t num_blocks,
545 			spdk_bdev_io_completion_cb cb, void *cb_arg)
546 {
547 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
548 					       cb_arg);
549 }
550 
551 int
552 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
553 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
554 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
555 			    struct spdk_bdev_ext_io_opts *opts)
556 {
557 	CU_ASSERT_PTR_NULL(opts->memory_domain);
558 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
559 
560 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
561 					       num_blocks, cb, cb_arg);
562 }
563 
564 int
565 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
566 			       struct iovec *iov, int iovcnt, void *md_buf,
567 			       uint64_t offset_blocks, uint64_t num_blocks,
568 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
569 {
570 	struct raid_bdev_io *raid_io = cb_arg;
571 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
572 	struct test_raid_bdev_io *test_raid_bdev_io;
573 	struct iovec src;
574 
575 	if (cb == raid5f_chunk_complete_bdev_io) {
576 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
577 						       num_blocks, cb, cb_arg);
578 	}
579 
580 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io);
581 
582 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
583 
584 	src.iov_base = test_raid_bdev_io->buf;
585 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
586 
587 	spdk_iovcpy(&src, 1, iov, iovcnt);
588 	if (md_buf != NULL) {
589 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
590 	}
591 
592 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
593 }
594 
595 int
596 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
597 		       struct iovec *iov, int iovcnt,
598 		       uint64_t offset_blocks, uint64_t num_blocks,
599 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
600 {
601 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
602 					      cb_arg);
603 }
604 
605 int
606 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
607 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
608 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
609 			   struct spdk_bdev_ext_io_opts *opts)
610 {
611 	CU_ASSERT_PTR_NULL(opts->memory_domain);
612 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
613 
614 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
615 					      num_blocks, cb, cb_arg);
616 }
617 
618 static void
619 xor_block(uint8_t *a, uint8_t *b, size_t size)
620 {
621 	while (size-- > 0) {
622 		a[size] ^= b[size];
623 	}
624 }
625 
626 static void
627 test_raid5f_write_request(struct raid_io_info *io_info)
628 {
629 	struct raid_bdev_io *raid_io;
630 
631 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
632 
633 	raid_io = get_raid_io(io_info);
634 
635 	raid5f_submit_rw_request(raid_io);
636 
637 	poll_threads();
638 
639 	process_io_completions(io_info);
640 
641 	if (g_test_degraded) {
642 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
643 		uint8_t p_idx;
644 		uint8_t i;
645 		off_t offset;
646 		uint32_t strip_len;
647 
648 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
649 			if (io_info->raid_ch->base_channel[i] == NULL) {
650 				break;
651 			}
652 		}
653 
654 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
655 
656 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
657 
658 		if (i == p_idx) {
659 			return;
660 		}
661 
662 		if (i >= p_idx) {
663 			i--;
664 		}
665 
666 		strip_len = raid_bdev->strip_size_kb * 1024;
667 		offset = i * strip_len;
668 
669 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
670 		if (io_info->dest_md_buf) {
671 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
672 			offset = i * strip_len;
673 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
674 		}
675 	}
676 
677 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
678 		if (io_info->parity_buf) {
679 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
680 					 io_info->parity_buf_size) == 0);
681 		}
682 		if (io_info->parity_md_buf) {
683 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
684 					 io_info->parity_md_buf_size) == 0);
685 		}
686 	}
687 }
688 
689 static void
690 test_raid5f_read_request(struct raid_io_info *io_info)
691 {
692 	struct raid_bdev_io *raid_io;
693 
694 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
695 
696 	raid_io = get_raid_io(io_info);
697 
698 	raid5f_submit_rw_request(raid_io);
699 
700 	process_io_completions(io_info);
701 
702 	if (g_test_degraded) {
703 		/* for the reconstruct read xor callback */
704 		poll_threads();
705 	}
706 }
707 
708 static void
709 deinit_io_info(struct raid_io_info *io_info)
710 {
711 	free(io_info->src_buf);
712 	free(io_info->dest_buf);
713 	free(io_info->src_md_buf);
714 	free(io_info->dest_md_buf);
715 	free(io_info->parity_buf);
716 	free(io_info->reference_parity);
717 	free(io_info->parity_md_buf);
718 	free(io_info->reference_md_parity);
719 	free(io_info->degraded_buf);
720 	free(io_info->degraded_md_buf);
721 }
722 
723 static void
724 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
725 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
726 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
727 {
728 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
729 	uint32_t blocklen = raid_bdev->bdev.blocklen;
730 	void *src_buf, *dest_buf;
731 	void *src_md_buf, *dest_md_buf;
732 	size_t buf_size = num_blocks * blocklen;
733 	size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
734 	uint64_t block;
735 	uint64_t i;
736 
737 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
738 
739 	memset(io_info, 0, sizeof(*io_info));
740 
741 	if (buf_size) {
742 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
743 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
744 
745 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
746 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
747 
748 		memset(src_buf, 0xff, buf_size);
749 		for (block = 0; block < num_blocks; block++) {
750 			*((uint64_t *)(src_buf + block * blocklen)) = block;
751 		}
752 	} else {
753 		src_buf = NULL;
754 		dest_buf = NULL;
755 	}
756 
757 	if (buf_md_size) {
758 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
759 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
760 
761 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
762 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
763 
764 		memset(src_md_buf, 0xff, buf_md_size);
765 		for (i = 0; i < buf_md_size; i++) {
766 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
767 		}
768 	} else {
769 		src_md_buf = NULL;
770 		dest_md_buf = NULL;
771 	}
772 
773 	io_info->r5f_info = r5f_info;
774 	io_info->raid_ch = raid_ch;
775 	io_info->io_type = io_type;
776 	io_info->stripe_index = stripe_index;
777 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
778 	io_info->stripe_offset_blocks = stripe_offset_blocks;
779 	io_info->num_blocks = num_blocks;
780 	io_info->src_buf = src_buf;
781 	io_info->dest_buf = dest_buf;
782 	io_info->src_md_buf = src_md_buf;
783 	io_info->dest_md_buf = dest_md_buf;
784 	io_info->buf_size = buf_size;
785 	io_info->buf_md_size = buf_md_size;
786 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
787 
788 	TAILQ_INIT(&io_info->bdev_io_queue);
789 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
790 }
791 
792 static void
793 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
794 {
795 	struct raid5f_info *r5f_info = io_info->r5f_info;
796 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
797 	uint32_t blocklen = raid_bdev->bdev.blocklen;
798 	size_t strip_len = raid_bdev->strip_size * blocklen;
799 	unsigned i;
800 
801 	io_info->parity_buf_size = strip_len;
802 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
803 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
804 
805 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
806 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
807 
808 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
809 		xor_block(io_info->reference_parity, src, strip_len);
810 		src += strip_len;
811 	}
812 
813 	if (src_md) {
814 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
815 
816 		io_info->parity_md_buf_size = strip_md_len;
817 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
818 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
819 
820 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
821 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
822 
823 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
824 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
825 			src_md += strip_md_len;
826 		}
827 	}
828 }
829 
830 static void
831 io_info_setup_degraded(struct raid_io_info *io_info)
832 {
833 	struct raid5f_info *r5f_info = io_info->r5f_info;
834 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
835 	uint32_t blocklen = raid_bdev->bdev.blocklen;
836 	uint32_t md_len = raid_bdev->bdev.md_len;
837 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
838 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
839 
840 	io_info->degraded_buf = malloc(stripe_len);
841 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
842 
843 	memset(io_info->degraded_buf, 0xab, stripe_len);
844 
845 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
846 	       io_info->src_buf, io_info->num_blocks * blocklen);
847 
848 	if (stripe_md_len != 0) {
849 		io_info->degraded_md_buf = malloc(stripe_md_len);
850 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
851 
852 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
853 
854 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
855 		       io_info->src_md_buf, io_info->num_blocks * md_len);
856 	}
857 
858 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
859 
860 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
861 	       0xcd, io_info->num_blocks * blocklen);
862 
863 	if (stripe_md_len != 0) {
864 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
865 		       0xcd, io_info->num_blocks * md_len);
866 	}
867 }
868 
869 static void
870 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
871 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
872 			      uint64_t num_blocks)
873 {
874 	struct raid_io_info io_info;
875 
876 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
877 
878 	switch (io_type) {
879 	case SPDK_BDEV_IO_TYPE_READ:
880 		if (g_test_degraded) {
881 			io_info_setup_degraded(&io_info);
882 		}
883 		test_raid5f_read_request(&io_info);
884 		break;
885 	case SPDK_BDEV_IO_TYPE_WRITE:
886 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
887 		test_raid5f_write_request(&io_info);
888 		break;
889 	default:
890 		CU_FAIL_FATAL("unsupported io_type");
891 	}
892 
893 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
894 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
895 	if (io_info.buf_md_size) {
896 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
897 	}
898 
899 	deinit_io_info(&io_info);
900 }
901 
902 static void
903 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
904 			   struct raid_bdev_io_channel *raid_ch))
905 {
906 	struct raid_params *params;
907 
908 	RAID_PARAMS_FOR_EACH(params) {
909 		struct raid5f_info *r5f_info;
910 		struct raid_bdev_io_channel raid_ch = { 0 };
911 		int i;
912 
913 		r5f_info = create_raid5f(params);
914 
915 		raid_ch.num_channels = params->num_base_bdevs;
916 		raid_ch.base_channel = calloc(params->num_base_bdevs, sizeof(struct spdk_io_channel *));
917 		SPDK_CU_ASSERT_FATAL(raid_ch.base_channel != NULL);
918 
919 		for (i = 0; i < params->num_base_bdevs; i++) {
920 			if (g_test_degraded && i == 0) {
921 				continue;
922 			}
923 			raid_ch.base_channel[i] = (void *)1;
924 		}
925 
926 		raid_ch.module_channel = raid5f_get_io_channel(r5f_info->raid_bdev);
927 		SPDK_CU_ASSERT_FATAL(raid_ch.module_channel);
928 
929 		test_fn(r5f_info->raid_bdev, &raid_ch);
930 
931 		spdk_put_io_channel(raid_ch.module_channel);
932 		poll_threads();
933 
934 		free(raid_ch.base_channel);
935 
936 		delete_raid5f(r5f_info);
937 	}
938 }
939 
940 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
941 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
942 
943 static void
944 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
945 {
946 	struct raid5f_info *r5f_info = raid_bdev->module_private;
947 	uint32_t strip_size = raid_bdev->strip_size;
948 	uint64_t stripe_index;
949 	unsigned int i;
950 
951 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
952 		uint64_t stripe_offset = i * strip_size;
953 
954 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
955 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
956 						      stripe_index, stripe_offset, 1);
957 
958 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
959 						      stripe_index, stripe_offset, strip_size);
960 
961 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
962 						      stripe_index, stripe_offset + strip_size - 1, 1);
963 			if (strip_size <= 2) {
964 				continue;
965 			}
966 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
967 						      stripe_index, stripe_offset + 1, strip_size - 2);
968 		}
969 	}
970 }
971 static void
972 test_raid5f_submit_read_request(void)
973 {
974 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
975 }
976 
977 static void
978 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
979 					struct raid_bdev_io_channel *raid_ch)
980 {
981 	struct raid5f_info *r5f_info = raid_bdev->module_private;
982 	struct raid5f_io_channel *r5ch = spdk_io_channel_get_ctx(raid_ch->module_channel);
983 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
984 	struct raid_io_info io_info;
985 	struct raid_bdev_io *raid_io;
986 	struct spdk_bdev_io *bdev_io;
987 	struct stripe_request *stripe_req;
988 	struct chunk *chunk;
989 	struct iovec *iovs_bak;
990 	struct iovec iovs[] = {
991 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
992 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
993 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
994 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
995 	};
996 	size_t iovcnt = SPDK_COUNTOF(iovs);
997 	int ret;
998 
999 	init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE, 0, 0, 0);
1000 
1001 	raid_io = get_raid_io(&io_info);
1002 	bdev_io = spdk_bdev_io_from_ctx(raid_io);
1003 	iovs_bak = bdev_io->u.bdev.iovs;
1004 	bdev_io->u.bdev.iovs = iovs;
1005 	bdev_io->u.bdev.iovcnt = iovcnt;
1006 
1007 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
1008 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
1009 
1010 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
1011 	stripe_req->raid_io = raid_io;
1012 
1013 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
1014 	CU_ASSERT(ret == 0);
1015 
1016 	chunk = &stripe_req->chunks[0];
1017 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
1018 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
1019 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
1020 
1021 	chunk = &stripe_req->chunks[1];
1022 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
1023 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
1024 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
1025 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
1026 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
1027 
1028 	if (raid_bdev->num_base_bdevs > 3) {
1029 		chunk = &stripe_req->chunks[2];
1030 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
1031 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
1032 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
1033 	}
1034 	if (raid_bdev->num_base_bdevs > 4) {
1035 		chunk = &stripe_req->chunks[3];
1036 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
1037 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
1038 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
1039 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
1040 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
1041 	}
1042 
1043 	bdev_io->u.bdev.iovs = iovs_bak;
1044 	raid5f_stripe_request_free(stripe_req);
1045 	spdk_bdev_free_io(bdev_io);
1046 	deinit_io_info(&io_info);
1047 }
1048 static void
1049 test_raid5f_stripe_request_map_iovecs(void)
1050 {
1051 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
1052 }
1053 
1054 static void
1055 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
1056 		struct raid_bdev_io_channel *raid_ch)
1057 {
1058 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1059 	uint64_t stripe_index;
1060 
1061 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1062 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1063 					      stripe_index, 0, r5f_info->stripe_blocks);
1064 	}
1065 }
1066 static void
1067 test_raid5f_submit_full_stripe_write_request(void)
1068 {
1069 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1070 }
1071 
1072 static void
1073 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1074 {
1075 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1076 	struct raid_base_bdev_info *base_bdev_info;
1077 	uint64_t stripe_index;
1078 	struct raid_io_info io_info;
1079 	enum test_bdev_error_type error_type;
1080 
1081 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1082 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1083 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1084 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1085 					     stripe_index, 0, r5f_info->stripe_blocks);
1086 
1087 				io_info.error.type = error_type;
1088 				io_info.error.bdev = base_bdev_info->desc->bdev;
1089 
1090 				test_raid5f_write_request(&io_info);
1091 
1092 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1093 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1094 				} else {
1095 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1096 				}
1097 
1098 				deinit_io_info(&io_info);
1099 			}
1100 		}
1101 	}
1102 }
1103 static void
1104 test_raid5f_chunk_write_error(void)
1105 {
1106 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1107 }
1108 
1109 struct chunk_write_error_with_enomem_ctx {
1110 	enum test_bdev_error_type error_type;
1111 	struct spdk_bdev *bdev;
1112 };
1113 
1114 static void
1115 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1116 {
1117 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1118 
1119 	io_info->error.type = ctx->error_type;
1120 	io_info->error.bdev = ctx->bdev;
1121 }
1122 
1123 static void
1124 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1125 		struct raid_bdev_io_channel *raid_ch)
1126 {
1127 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1128 	struct raid_base_bdev_info *base_bdev_info;
1129 	uint64_t stripe_index;
1130 	struct raid_io_info io_info;
1131 	enum test_bdev_error_type error_type;
1132 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1133 
1134 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1135 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1136 			struct raid_base_bdev_info *base_bdev_info_last =
1137 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1138 
1139 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1140 				if (base_bdev_info == base_bdev_info_last) {
1141 					continue;
1142 				}
1143 
1144 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1145 					     stripe_index, 0, r5f_info->stripe_blocks);
1146 
1147 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1148 				io_info.error.bdev = base_bdev_info->desc->bdev;
1149 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1150 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1151 				on_enomem_cb_ctx.error_type = error_type;
1152 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1153 
1154 				test_raid5f_write_request(&io_info);
1155 
1156 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1157 
1158 				deinit_io_info(&io_info);
1159 			}
1160 		}
1161 	}
1162 }
1163 static void
1164 test_raid5f_chunk_write_error_with_enomem(void)
1165 {
1166 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1167 }
1168 
1169 static void
1170 test_raid5f_submit_full_stripe_write_request_degraded(void)
1171 {
1172 	g_test_degraded = true;
1173 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1174 }
1175 
1176 static void
1177 test_raid5f_submit_read_request_degraded(void)
1178 {
1179 	g_test_degraded = true;
1180 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1181 }
1182 
1183 int
1184 main(int argc, char **argv)
1185 {
1186 	CU_pSuite suite = NULL;
1187 	unsigned int num_failures;
1188 
1189 	CU_initialize_registry();
1190 
1191 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1192 			test_setup, NULL);
1193 	CU_ADD_TEST(suite, test_raid5f_start);
1194 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1195 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1196 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1197 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1198 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1199 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1200 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1201 
1202 	allocate_threads(1);
1203 	set_thread(0);
1204 
1205 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1206 	CU_cleanup_registry();
1207 
1208 	free_threads();
1209 
1210 	return num_failures;
1211 }
1212