xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision 698da7188b75eb6e89fd36e5a83f478699b653bd)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 static bool g_test_degraded;
18 
19 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
20 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
21 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
22 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
23 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
24 DEFINE_STUB_V(raid_bdev_process_request_complete, (struct raid_bdev_process_request *process_req,
25 		int status));
26 DEFINE_STUB_V(raid_bdev_io_init, (struct raid_bdev_io *raid_io,
27 				  struct raid_bdev_io_channel *raid_ch,
28 				  enum spdk_bdev_io_type type, uint64_t offset_blocks,
29 				  uint64_t num_blocks, struct iovec *iovs, int iovcnt, void *md_buf,
30 				  struct spdk_memory_domain *memory_domain, void *memory_domain_ctx));
31 
32 struct spdk_io_channel *
33 spdk_accel_get_io_channel(void)
34 {
35 	return spdk_get_io_channel(g_accel_p);
36 }
37 
38 struct xor_ctx {
39 	spdk_accel_completion_cb cb_fn;
40 	void *cb_arg;
41 };
42 
43 static void
44 finish_xor(void *_ctx)
45 {
46 	struct xor_ctx *ctx = _ctx;
47 
48 	ctx->cb_fn(ctx->cb_arg, 0);
49 
50 	free(ctx);
51 }
52 
53 int
54 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
55 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
56 {
57 	struct xor_ctx *ctx;
58 
59 	ctx = malloc(sizeof(*ctx));
60 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
61 	ctx->cb_fn = cb_fn;
62 	ctx->cb_arg = cb_arg;
63 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
64 
65 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
66 
67 	return 0;
68 }
69 
70 static void
71 init_accel(void)
72 {
73 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
74 				sizeof(int), "accel_p");
75 }
76 
77 static void
78 fini_accel(void)
79 {
80 	spdk_io_device_unregister(g_accel_p, NULL);
81 }
82 
83 static int
84 test_suite_init(void)
85 {
86 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
87 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
88 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
89 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
90 	enum raid_params_md_type md_type_values[] = { RAID_PARAMS_MD_NONE, RAID_PARAMS_MD_SEPARATE, RAID_PARAMS_MD_INTERLEAVED };
91 	uint8_t *num_base_bdevs;
92 	uint64_t *base_bdev_blockcnt;
93 	uint32_t *base_bdev_blocklen;
94 	uint32_t *strip_size_kb;
95 	enum raid_params_md_type *md_type;
96 	uint64_t params_count;
97 	int rc;
98 
99 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
100 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
101 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
102 		       SPDK_COUNTOF(strip_size_kb_values) *
103 		       SPDK_COUNTOF(md_type_values);
104 	rc = raid_test_params_alloc(params_count);
105 	if (rc) {
106 		return rc;
107 	}
108 
109 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
110 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
111 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
112 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
113 					ARRAY_FOR_EACH(md_type_values, md_type) {
114 						struct raid_params params = {
115 							.num_base_bdevs = *num_base_bdevs,
116 							.base_bdev_blockcnt = *base_bdev_blockcnt,
117 							.base_bdev_blocklen = *base_bdev_blocklen,
118 							.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen,
119 							.md_type = *md_type,
120 						};
121 						if (params.strip_size == 0 ||
122 						    params.strip_size > params.base_bdev_blockcnt) {
123 							continue;
124 						}
125 						raid_test_params_add(&params);
126 					}
127 				}
128 			}
129 		}
130 	}
131 
132 	init_accel();
133 
134 	return 0;
135 }
136 
137 static int
138 test_suite_cleanup(void)
139 {
140 	fini_accel();
141 	raid_test_params_free();
142 	return 0;
143 }
144 
145 static void
146 test_setup(void)
147 {
148 	g_test_degraded = false;
149 }
150 
151 static struct raid5f_info *
152 create_raid5f(struct raid_params *params)
153 {
154 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
155 
156 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
157 
158 	return raid_bdev->module_private;
159 }
160 
161 static void
162 delete_raid5f(struct raid5f_info *r5f_info)
163 {
164 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
165 
166 	raid5f_stop(raid_bdev);
167 
168 	raid_test_delete_raid_bdev(raid_bdev);
169 }
170 
171 static void
172 test_raid5f_start(void)
173 {
174 	struct raid_params *params;
175 
176 	RAID_PARAMS_FOR_EACH(params) {
177 		struct raid5f_info *r5f_info;
178 
179 		r5f_info = create_raid5f(params);
180 
181 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
182 
183 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
184 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
185 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
186 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
187 				(params->num_base_bdevs - 1));
188 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
189 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
190 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
191 
192 		delete_raid5f(r5f_info);
193 	}
194 }
195 
196 enum test_bdev_error_type {
197 	TEST_BDEV_ERROR_NONE,
198 	TEST_BDEV_ERROR_SUBMIT,
199 	TEST_BDEV_ERROR_COMPLETE,
200 	TEST_BDEV_ERROR_NOMEM,
201 };
202 
203 struct raid_io_info {
204 	struct raid5f_info *r5f_info;
205 	struct raid_bdev_io_channel *raid_ch;
206 	enum spdk_bdev_io_type io_type;
207 	uint64_t stripe_index;
208 	uint64_t offset_blocks;
209 	uint64_t stripe_offset_blocks;
210 	uint64_t num_blocks;
211 	void *src_buf;
212 	void *dest_buf;
213 	void *src_md_buf;
214 	void *dest_md_buf;
215 	size_t buf_size;
216 	size_t buf_md_size;
217 	void *parity_buf;
218 	void *reference_parity;
219 	size_t parity_buf_size;
220 	void *parity_md_buf;
221 	void *reference_md_parity;
222 	size_t parity_md_buf_size;
223 	void *degraded_buf;
224 	void *degraded_md_buf;
225 	enum spdk_bdev_io_status status;
226 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
227 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
228 	struct {
229 		enum test_bdev_error_type type;
230 		struct spdk_bdev *bdev;
231 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
232 		void *on_enomem_cb_ctx;
233 	} error;
234 };
235 
236 struct test_raid_bdev_io {
237 	struct raid_bdev_io raid_io;
238 	struct raid_io_info *io_info;
239 	void *buf;
240 	void *buf_md;
241 };
242 
243 void
244 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
245 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
246 {
247 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
248 			raid_io);
249 	struct raid_io_info *io_info = test_raid_bdev_io->io_info;
250 
251 	raid_io->waitq_entry.bdev = bdev;
252 	raid_io->waitq_entry.cb_fn = cb_fn;
253 	raid_io->waitq_entry.cb_arg = raid_io;
254 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
255 }
256 
257 void
258 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
259 {
260 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
261 			raid_io);
262 
263 	test_raid_bdev_io->io_info->status = status;
264 
265 	free(raid_io->iovs);
266 	free(test_raid_bdev_io);
267 }
268 
269 static struct raid_bdev_io *
270 get_raid_io(struct raid_io_info *io_info)
271 {
272 	struct raid_bdev_io *raid_io;
273 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
274 	uint32_t blocklen = raid_bdev->bdev.blocklen;
275 	struct test_raid_bdev_io *test_raid_bdev_io;
276 	struct iovec *iovs;
277 	int iovcnt;
278 	void *md_buf;
279 	size_t iov_len, remaining;
280 	struct iovec *iov;
281 	void *buf;
282 	int i;
283 
284 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
285 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
286 
287 	test_raid_bdev_io->io_info = io_info;
288 
289 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
290 		test_raid_bdev_io->buf = io_info->src_buf;
291 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
292 		buf = io_info->dest_buf;
293 		md_buf = io_info->dest_md_buf;
294 	} else {
295 		test_raid_bdev_io->buf = io_info->dest_buf;
296 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
297 		buf = io_info->src_buf;
298 		md_buf = io_info->src_md_buf;
299 	}
300 
301 	iovcnt = 7;
302 	iovs = calloc(iovcnt, sizeof(*iovs));
303 	SPDK_CU_ASSERT_FATAL(iovs != NULL);
304 
305 	remaining = io_info->num_blocks * blocklen;
306 	iov_len = remaining / iovcnt;
307 
308 	for (i = 0; i < iovcnt; i++) {
309 		iov = &iovs[i];
310 		iov->iov_base = buf;
311 		iov->iov_len = iov_len;
312 		buf += iov_len;
313 		remaining -= iov_len;
314 	}
315 	iov->iov_len += remaining;
316 
317 	raid_io = &test_raid_bdev_io->raid_io;
318 
319 	raid_test_bdev_io_init(raid_io, raid_bdev, io_info->raid_ch, io_info->io_type,
320 			       io_info->offset_blocks, io_info->num_blocks, iovs, iovcnt, md_buf);
321 
322 	return raid_io;
323 }
324 
325 void
326 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
327 {
328 	free(bdev_io);
329 }
330 
331 static int
332 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
333 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
334 {
335 	struct spdk_bdev *bdev = desc->bdev;
336 	struct spdk_bdev_io *bdev_io;
337 
338 	if (bdev == io_info->error.bdev) {
339 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
340 			return -EINVAL;
341 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
342 			return -ENOMEM;
343 		}
344 	}
345 
346 	bdev_io = calloc(1, sizeof(*bdev_io));
347 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
348 	bdev_io->bdev = bdev;
349 	bdev_io->internal.cb = cb;
350 	bdev_io->internal.caller_ctx = cb_arg;
351 
352 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
353 
354 	return 0;
355 }
356 
357 static void
358 process_io_completions(struct raid_io_info *io_info)
359 {
360 	struct spdk_bdev_io *bdev_io;
361 	bool success;
362 
363 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
364 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
365 
366 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
367 		    io_info->error.bdev == bdev_io->bdev) {
368 			success = false;
369 		} else {
370 			success = true;
371 		}
372 
373 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
374 	}
375 
376 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
377 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
378 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
379 
380 		io_info->error.type = TEST_BDEV_ERROR_NONE;
381 
382 		if (io_info->error.on_enomem_cb != NULL) {
383 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
384 		}
385 
386 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
387 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
388 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
389 			waitq_entry->cb_fn(waitq_entry->cb_arg);
390 		}
391 
392 		process_io_completions(io_info);
393 	} else {
394 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
395 	}
396 }
397 
398 int
399 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
400 				struct iovec *iov, int iovcnt, void *md_buf,
401 				uint64_t offset_blocks, uint64_t num_blocks,
402 				spdk_bdev_io_completion_cb cb, void *cb_arg)
403 {
404 	struct chunk *chunk = cb_arg;
405 	struct stripe_request *stripe_req;
406 	struct test_raid_bdev_io *test_raid_bdev_io;
407 	struct raid_io_info *io_info;
408 	struct raid5f_info *r5f_info;
409 	struct raid_bdev *raid_bdev;
410 	uint8_t data_chunk_idx;
411 	uint64_t data_offset;
412 	struct iovec dest;
413 	void *dest_md_buf;
414 
415 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
416 
417 	stripe_req = raid5f_chunk_stripe_req(chunk);
418 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
419 	io_info = test_raid_bdev_io->io_info;
420 	r5f_info = io_info->r5f_info;
421 	raid_bdev = r5f_info->raid_bdev;
422 
423 	if (chunk == stripe_req->parity_chunk) {
424 		if (io_info->parity_buf == NULL) {
425 			goto submit;
426 		}
427 		dest.iov_base = io_info->parity_buf;
428 		if (md_buf != NULL) {
429 			dest_md_buf = io_info->parity_md_buf;
430 		}
431 	} else {
432 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
433 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
434 		dest.iov_base = test_raid_bdev_io->buf + data_offset;
435 		if (md_buf != NULL) {
436 			data_offset = (data_offset >> r5f_info->blocklen_shift) * raid_bdev->bdev.md_len;
437 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
438 		}
439 	}
440 	dest.iov_len = num_blocks * raid_bdev->bdev.blocklen;
441 
442 	spdk_iovcpy(iov, iovcnt, &dest, 1);
443 	if (md_buf != NULL) {
444 		memcpy(dest_md_buf, md_buf, num_blocks * raid_bdev->bdev.md_len);
445 	}
446 
447 submit:
448 	return submit_io(io_info, desc, cb, cb_arg);
449 }
450 
451 static int
452 spdk_bdev_readv_blocks_degraded(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
453 				struct iovec *iov, int iovcnt, void *md_buf,
454 				uint64_t offset_blocks, uint64_t num_blocks,
455 				spdk_bdev_io_completion_cb cb, void *cb_arg)
456 {
457 	struct chunk *chunk = cb_arg;
458 	struct stripe_request *stripe_req;
459 	struct test_raid_bdev_io *test_raid_bdev_io;
460 	struct raid_io_info *io_info;
461 	struct raid_bdev *raid_bdev;
462 	uint8_t data_chunk_idx;
463 	void *buf, *buf_md;
464 	struct iovec src;
465 
466 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
467 
468 	stripe_req = raid5f_chunk_stripe_req(chunk);
469 	test_raid_bdev_io = SPDK_CONTAINEROF(stripe_req->raid_io, struct test_raid_bdev_io, raid_io);
470 	io_info = test_raid_bdev_io->io_info;
471 	raid_bdev = io_info->r5f_info->raid_bdev;
472 
473 	if (chunk == stripe_req->parity_chunk) {
474 		buf = io_info->reference_parity;
475 		buf_md = io_info->reference_md_parity;
476 	} else {
477 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
478 		buf = io_info->degraded_buf +
479 		      data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
480 		buf_md = io_info->degraded_md_buf +
481 			 data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.md_len;
482 	}
483 
484 	buf += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.blocklen;
485 	buf_md += (offset_blocks % raid_bdev->strip_size) * raid_bdev->bdev.md_len;
486 
487 	src.iov_base = buf;
488 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
489 
490 	spdk_iovcpy(&src, 1, iov, iovcnt);
491 	if (md_buf != NULL) {
492 		memcpy(md_buf, buf_md, num_blocks * raid_bdev->bdev.md_len);
493 	}
494 
495 	return submit_io(io_info, desc, cb, cb_arg);
496 }
497 
498 int
499 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
500 			struct iovec *iov, int iovcnt,
501 			uint64_t offset_blocks, uint64_t num_blocks,
502 			spdk_bdev_io_completion_cb cb, void *cb_arg)
503 {
504 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
505 					       cb_arg);
506 }
507 
508 int
509 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
510 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
511 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
512 			    struct spdk_bdev_ext_io_opts *opts)
513 {
514 	CU_ASSERT_PTR_NULL(opts->memory_domain);
515 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
516 
517 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
518 					       num_blocks, cb, cb_arg);
519 }
520 
521 int
522 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
523 			       struct iovec *iov, int iovcnt, void *md_buf,
524 			       uint64_t offset_blocks, uint64_t num_blocks,
525 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
526 {
527 	struct raid_bdev_io *raid_io = cb_arg;
528 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
529 	struct test_raid_bdev_io *test_raid_bdev_io = SPDK_CONTAINEROF(raid_io, struct test_raid_bdev_io,
530 			raid_io);
531 	struct iovec src;
532 
533 	if (cb == raid5f_chunk_complete_bdev_io) {
534 		return spdk_bdev_readv_blocks_degraded(desc, ch, iov, iovcnt, md_buf, offset_blocks,
535 						       num_blocks, cb, cb_arg);
536 	}
537 
538 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
539 
540 	src.iov_base = test_raid_bdev_io->buf;
541 	src.iov_len = num_blocks * raid_bdev->bdev.blocklen;
542 
543 	spdk_iovcpy(&src, 1, iov, iovcnt);
544 	if (md_buf != NULL) {
545 		memcpy(md_buf, test_raid_bdev_io->buf_md, num_blocks * raid_bdev->bdev.md_len);
546 	}
547 
548 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
549 }
550 
551 int
552 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
553 		       struct iovec *iov, int iovcnt,
554 		       uint64_t offset_blocks, uint64_t num_blocks,
555 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
556 {
557 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
558 					      cb_arg);
559 }
560 
561 int
562 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
563 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
564 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
565 			   struct spdk_bdev_ext_io_opts *opts)
566 {
567 	CU_ASSERT_PTR_NULL(opts->memory_domain);
568 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
569 
570 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
571 					      num_blocks, cb, cb_arg);
572 }
573 
574 static void
575 xor_block(uint8_t *a, uint8_t *b, size_t size)
576 {
577 	while (size-- > 0) {
578 		a[size] ^= b[size];
579 	}
580 }
581 
582 static void
583 test_raid5f_write_request(struct raid_io_info *io_info)
584 {
585 	struct raid_bdev_io *raid_io;
586 
587 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
588 
589 	raid_io = get_raid_io(io_info);
590 
591 	raid5f_submit_rw_request(raid_io);
592 
593 	poll_threads();
594 
595 	process_io_completions(io_info);
596 
597 	if (g_test_degraded) {
598 		struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
599 		uint8_t p_idx;
600 		uint8_t i;
601 		off_t offset;
602 		uint32_t strip_len;
603 
604 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
605 			if (!raid_bdev_channel_get_base_channel(io_info->raid_ch, i)) {
606 				break;
607 			}
608 		}
609 
610 		SPDK_CU_ASSERT_FATAL(i != raid_bdev->num_base_bdevs);
611 
612 		p_idx = raid5f_stripe_parity_chunk_index(raid_bdev, io_info->stripe_index);
613 
614 		if (i == p_idx) {
615 			return;
616 		}
617 
618 		if (i >= p_idx) {
619 			i--;
620 		}
621 
622 		strip_len = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
623 		offset = i * strip_len;
624 
625 		memcpy(io_info->dest_buf + offset, io_info->src_buf + offset, strip_len);
626 		if (io_info->dest_md_buf) {
627 			strip_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
628 			offset = i * strip_len;
629 			memcpy(io_info->dest_md_buf + offset, io_info->src_md_buf + offset, strip_len);
630 		}
631 	}
632 
633 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
634 		if (io_info->parity_buf) {
635 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
636 					 io_info->parity_buf_size) == 0);
637 		}
638 		if (io_info->parity_md_buf) {
639 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
640 					 io_info->parity_md_buf_size) == 0);
641 		}
642 	}
643 }
644 
645 static void
646 test_raid5f_read_request(struct raid_io_info *io_info)
647 {
648 	struct raid_bdev_io *raid_io;
649 
650 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
651 
652 	raid_io = get_raid_io(io_info);
653 
654 	raid5f_submit_rw_request(raid_io);
655 
656 	process_io_completions(io_info);
657 
658 	if (g_test_degraded) {
659 		/* for the reconstruct read xor callback */
660 		poll_threads();
661 	}
662 }
663 
664 static void
665 deinit_io_info(struct raid_io_info *io_info)
666 {
667 	free(io_info->src_buf);
668 	free(io_info->dest_buf);
669 	free(io_info->src_md_buf);
670 	free(io_info->dest_md_buf);
671 	free(io_info->parity_buf);
672 	free(io_info->reference_parity);
673 	free(io_info->parity_md_buf);
674 	free(io_info->reference_md_parity);
675 	free(io_info->degraded_buf);
676 	free(io_info->degraded_md_buf);
677 }
678 
679 static void
680 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
681 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
682 	     uint64_t stripe_index, uint64_t stripe_offset_blocks, uint64_t num_blocks)
683 {
684 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
685 	uint32_t blocklen = raid_bdev->bdev.blocklen;
686 	void *src_buf, *dest_buf;
687 	void *src_md_buf, *dest_md_buf;
688 	size_t buf_size = num_blocks * blocklen;
689 	size_t buf_md_size = raid_bdev->bdev.md_interleave ? 0 : num_blocks * raid_bdev->bdev.md_len;
690 	uint64_t block;
691 	uint64_t i;
692 
693 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
694 
695 	memset(io_info, 0, sizeof(*io_info));
696 
697 	if (buf_size) {
698 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
699 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
700 
701 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
702 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
703 
704 		memset(src_buf, 0xff, buf_size);
705 		for (block = 0; block < num_blocks; block++) {
706 			*((uint64_t *)(src_buf + block * blocklen)) = block;
707 		}
708 	} else {
709 		src_buf = NULL;
710 		dest_buf = NULL;
711 	}
712 
713 	if (buf_md_size) {
714 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
715 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
716 
717 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
718 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
719 
720 		memset(src_md_buf, 0xff, buf_md_size);
721 		for (i = 0; i < buf_md_size; i++) {
722 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
723 		}
724 	} else {
725 		src_md_buf = NULL;
726 		dest_md_buf = NULL;
727 	}
728 
729 	io_info->r5f_info = r5f_info;
730 	io_info->raid_ch = raid_ch;
731 	io_info->io_type = io_type;
732 	io_info->stripe_index = stripe_index;
733 	io_info->offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
734 	io_info->stripe_offset_blocks = stripe_offset_blocks;
735 	io_info->num_blocks = num_blocks;
736 	io_info->src_buf = src_buf;
737 	io_info->dest_buf = dest_buf;
738 	io_info->src_md_buf = src_md_buf;
739 	io_info->dest_md_buf = dest_md_buf;
740 	io_info->buf_size = buf_size;
741 	io_info->buf_md_size = buf_md_size;
742 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
743 
744 	TAILQ_INIT(&io_info->bdev_io_queue);
745 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
746 }
747 
748 static void
749 io_info_setup_parity(struct raid_io_info *io_info, void *src, void *src_md)
750 {
751 	struct raid5f_info *r5f_info = io_info->r5f_info;
752 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
753 	uint32_t blocklen = raid_bdev->bdev.blocklen;
754 	size_t strip_len = raid_bdev->strip_size * blocklen;
755 	unsigned i;
756 
757 	io_info->parity_buf_size = strip_len;
758 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
759 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
760 
761 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
762 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
763 
764 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
765 		xor_block(io_info->reference_parity, src, strip_len);
766 		src += strip_len;
767 	}
768 
769 	if (src_md) {
770 		size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
771 
772 		SPDK_CU_ASSERT_FATAL(raid_bdev->bdev.md_interleave == 0);
773 
774 		io_info->parity_md_buf_size = strip_md_len;
775 		io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
776 		SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
777 
778 		io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
779 		SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
780 
781 		for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
782 			xor_block(io_info->reference_md_parity, src_md, strip_md_len);
783 			src_md += strip_md_len;
784 		}
785 	}
786 }
787 
788 static void
789 io_info_setup_degraded(struct raid_io_info *io_info)
790 {
791 	struct raid5f_info *r5f_info = io_info->r5f_info;
792 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
793 	uint32_t blocklen = raid_bdev->bdev.blocklen;
794 	uint32_t md_len = raid_bdev->bdev.md_interleave ? 0 : raid_bdev->bdev.md_len;
795 	size_t stripe_len = r5f_info->stripe_blocks * blocklen;
796 	size_t stripe_md_len = r5f_info->stripe_blocks * md_len;
797 
798 	io_info->degraded_buf = malloc(stripe_len);
799 	SPDK_CU_ASSERT_FATAL(io_info->degraded_buf != NULL);
800 
801 	memset(io_info->degraded_buf, 0xab, stripe_len);
802 
803 	memcpy(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
804 	       io_info->src_buf, io_info->num_blocks * blocklen);
805 
806 	if (stripe_md_len != 0) {
807 		io_info->degraded_md_buf = malloc(stripe_md_len);
808 		SPDK_CU_ASSERT_FATAL(io_info->degraded_md_buf != NULL);
809 
810 		memset(io_info->degraded_md_buf, 0xab, stripe_md_len);
811 
812 		memcpy(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
813 		       io_info->src_md_buf, io_info->num_blocks * md_len);
814 	}
815 
816 	io_info_setup_parity(io_info, io_info->degraded_buf, io_info->degraded_md_buf);
817 
818 	memset(io_info->degraded_buf + io_info->stripe_offset_blocks * blocklen,
819 	       0xcd, io_info->num_blocks * blocklen);
820 
821 	if (stripe_md_len != 0) {
822 		memset(io_info->degraded_md_buf + io_info->stripe_offset_blocks * md_len,
823 		       0xcd, io_info->num_blocks * md_len);
824 	}
825 }
826 
827 static void
828 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
829 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
830 			      uint64_t num_blocks)
831 {
832 	struct raid_io_info io_info;
833 
834 	init_io_info(&io_info, r5f_info, raid_ch, io_type, stripe_index, stripe_offset_blocks, num_blocks);
835 
836 	switch (io_type) {
837 	case SPDK_BDEV_IO_TYPE_READ:
838 		if (g_test_degraded) {
839 			io_info_setup_degraded(&io_info);
840 		}
841 		test_raid5f_read_request(&io_info);
842 		break;
843 	case SPDK_BDEV_IO_TYPE_WRITE:
844 		io_info_setup_parity(&io_info, io_info.src_buf, io_info.src_md_buf);
845 		test_raid5f_write_request(&io_info);
846 		break;
847 	default:
848 		CU_FAIL_FATAL("unsupported io_type");
849 	}
850 
851 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
852 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
853 	if (io_info.buf_md_size) {
854 		CU_ASSERT(memcmp(io_info.src_md_buf, io_info.dest_md_buf, io_info.buf_md_size) == 0);
855 	}
856 
857 	deinit_io_info(&io_info);
858 }
859 
860 static void
861 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
862 			   struct raid_bdev_io_channel *raid_ch))
863 {
864 	struct raid_params *params;
865 
866 	RAID_PARAMS_FOR_EACH(params) {
867 		struct raid5f_info *r5f_info;
868 		struct raid_bdev_io_channel *raid_ch;
869 
870 		r5f_info = create_raid5f(params);
871 		raid_ch = raid_test_create_io_channel(r5f_info->raid_bdev);
872 
873 		if (g_test_degraded) {
874 			raid_ch->_base_channels[0] = NULL;
875 		}
876 
877 		test_fn(r5f_info->raid_bdev, raid_ch);
878 
879 		raid_test_destroy_io_channel(raid_ch);
880 		delete_raid5f(r5f_info);
881 	}
882 }
883 
884 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
885 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
886 
887 static void
888 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
889 {
890 	struct raid5f_info *r5f_info = raid_bdev->module_private;
891 	uint32_t strip_size = raid_bdev->strip_size;
892 	uint64_t stripe_index;
893 	unsigned int i;
894 
895 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
896 		uint64_t stripe_offset = i * strip_size;
897 
898 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
899 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
900 						      stripe_index, stripe_offset, 1);
901 
902 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
903 						      stripe_index, stripe_offset, strip_size);
904 
905 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
906 						      stripe_index, stripe_offset + strip_size - 1, 1);
907 			if (strip_size <= 2) {
908 				continue;
909 			}
910 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
911 						      stripe_index, stripe_offset + 1, strip_size - 2);
912 		}
913 	}
914 }
915 static void
916 test_raid5f_submit_read_request(void)
917 {
918 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
919 }
920 
921 static void
922 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
923 					struct raid_bdev_io_channel *raid_ch)
924 {
925 	struct raid5f_io_channel *r5ch = raid_bdev_channel_get_module_ctx(raid_ch);
926 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
927 	struct raid_bdev_io raid_io = {};
928 	struct stripe_request *stripe_req;
929 	struct chunk *chunk;
930 	struct iovec iovs[] = {
931 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
932 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
933 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
934 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
935 	};
936 	size_t iovcnt = SPDK_COUNTOF(iovs);
937 	int ret;
938 
939 	raid_io.raid_bdev = raid_bdev;
940 	raid_io.iovs = iovs;
941 	raid_io.iovcnt = iovcnt;
942 
943 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
944 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
945 
946 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
947 	stripe_req->raid_io = &raid_io;
948 
949 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
950 	CU_ASSERT(ret == 0);
951 
952 	chunk = &stripe_req->chunks[0];
953 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
954 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
955 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
956 
957 	chunk = &stripe_req->chunks[1];
958 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
959 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
960 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
961 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
962 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
963 
964 	if (raid_bdev->num_base_bdevs > 3) {
965 		chunk = &stripe_req->chunks[2];
966 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
967 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
968 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
969 	}
970 	if (raid_bdev->num_base_bdevs > 4) {
971 		chunk = &stripe_req->chunks[3];
972 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
973 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
974 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
975 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
976 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
977 	}
978 
979 	raid5f_stripe_request_free(stripe_req);
980 }
981 static void
982 test_raid5f_stripe_request_map_iovecs(void)
983 {
984 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
985 }
986 
987 static void
988 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
989 		struct raid_bdev_io_channel *raid_ch)
990 {
991 	struct raid5f_info *r5f_info = raid_bdev->module_private;
992 	uint64_t stripe_index;
993 
994 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
995 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
996 					      stripe_index, 0, r5f_info->stripe_blocks);
997 	}
998 }
999 static void
1000 test_raid5f_submit_full_stripe_write_request(void)
1001 {
1002 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1003 }
1004 
1005 static void
1006 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
1007 {
1008 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1009 	struct raid_base_bdev_info *base_bdev_info;
1010 	uint64_t stripe_index;
1011 	struct raid_io_info io_info;
1012 	enum test_bdev_error_type error_type;
1013 
1014 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
1015 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1016 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1017 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1018 					     stripe_index, 0, r5f_info->stripe_blocks);
1019 
1020 				io_info.error.type = error_type;
1021 				io_info.error.bdev = base_bdev_info->desc->bdev;
1022 
1023 				test_raid5f_write_request(&io_info);
1024 
1025 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
1026 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
1027 				} else {
1028 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1029 				}
1030 
1031 				deinit_io_info(&io_info);
1032 			}
1033 		}
1034 	}
1035 }
1036 static void
1037 test_raid5f_chunk_write_error(void)
1038 {
1039 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
1040 }
1041 
1042 struct chunk_write_error_with_enomem_ctx {
1043 	enum test_bdev_error_type error_type;
1044 	struct spdk_bdev *bdev;
1045 };
1046 
1047 static void
1048 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
1049 {
1050 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
1051 
1052 	io_info->error.type = ctx->error_type;
1053 	io_info->error.bdev = ctx->bdev;
1054 }
1055 
1056 static void
1057 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
1058 		struct raid_bdev_io_channel *raid_ch)
1059 {
1060 	struct raid5f_info *r5f_info = raid_bdev->module_private;
1061 	struct raid_base_bdev_info *base_bdev_info;
1062 	uint64_t stripe_index;
1063 	struct raid_io_info io_info;
1064 	enum test_bdev_error_type error_type;
1065 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
1066 
1067 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
1068 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
1069 			struct raid_base_bdev_info *base_bdev_info_last =
1070 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
1071 
1072 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
1073 				if (base_bdev_info == base_bdev_info_last) {
1074 					continue;
1075 				}
1076 
1077 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
1078 					     stripe_index, 0, r5f_info->stripe_blocks);
1079 
1080 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
1081 				io_info.error.bdev = base_bdev_info->desc->bdev;
1082 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
1083 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
1084 				on_enomem_cb_ctx.error_type = error_type;
1085 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
1086 
1087 				test_raid5f_write_request(&io_info);
1088 
1089 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
1090 
1091 				deinit_io_info(&io_info);
1092 			}
1093 		}
1094 	}
1095 }
1096 static void
1097 test_raid5f_chunk_write_error_with_enomem(void)
1098 {
1099 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
1100 }
1101 
1102 static void
1103 test_raid5f_submit_full_stripe_write_request_degraded(void)
1104 {
1105 	g_test_degraded = true;
1106 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
1107 }
1108 
1109 static void
1110 test_raid5f_submit_read_request_degraded(void)
1111 {
1112 	g_test_degraded = true;
1113 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
1114 }
1115 
1116 int
1117 main(int argc, char **argv)
1118 {
1119 	CU_pSuite suite = NULL;
1120 	unsigned int num_failures;
1121 
1122 	CU_initialize_registry();
1123 
1124 	suite = CU_add_suite_with_setup_and_teardown("raid5f", test_suite_init, test_suite_cleanup,
1125 			test_setup, NULL);
1126 	CU_ADD_TEST(suite, test_raid5f_start);
1127 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
1128 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
1129 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
1130 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
1131 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1132 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request_degraded);
1133 	CU_ADD_TEST(suite, test_raid5f_submit_read_request_degraded);
1134 
1135 	allocate_threads(1);
1136 	set_thread(0);
1137 
1138 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1139 	CU_cleanup_registry();
1140 
1141 	free_threads();
1142 
1143 	return num_failures;
1144 }
1145