xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 #include "spdk/xor.h"
10 
11 #include "common/lib/ut_multithread.c"
12 
13 #include "bdev/raid/raid5f.c"
14 #include "../common.c"
15 
16 static void *g_accel_p = (void *)0xdeadbeaf;
17 
18 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
19 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
20 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
21 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
22 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
23 
24 struct spdk_io_channel *
25 spdk_accel_get_io_channel(void)
26 {
27 	return spdk_get_io_channel(g_accel_p);
28 }
29 
30 void *
31 spdk_bdev_io_get_md_buf(struct spdk_bdev_io *bdev_io)
32 {
33 	return bdev_io->u.bdev.md_buf;
34 }
35 
36 uint32_t
37 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
38 {
39 	return bdev->md_len;
40 }
41 
42 struct xor_ctx {
43 	spdk_accel_completion_cb cb_fn;
44 	void *cb_arg;
45 };
46 
47 static void
48 finish_xor(void *_ctx)
49 {
50 	struct xor_ctx *ctx = _ctx;
51 
52 	ctx->cb_fn(ctx->cb_arg, 0);
53 
54 	free(ctx);
55 }
56 
57 int
58 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
59 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
60 {
61 	struct xor_ctx *ctx;
62 
63 	ctx = malloc(sizeof(*ctx));
64 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
65 	ctx->cb_fn = cb_fn;
66 	ctx->cb_arg = cb_arg;
67 	SPDK_CU_ASSERT_FATAL(spdk_xor_gen(dst, sources, nsrcs, nbytes) == 0);
68 
69 	spdk_thread_send_msg(spdk_get_thread(), finish_xor, ctx);
70 
71 	return 0;
72 }
73 
74 void
75 raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
76 {
77 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
78 
79 	if (bdev_io->internal.cb) {
80 		bdev_io->internal.cb(bdev_io, status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->internal.caller_ctx);
81 	}
82 }
83 
84 bool
85 raid_bdev_io_complete_part(struct raid_bdev_io *raid_io, uint64_t completed,
86 			   enum spdk_bdev_io_status status)
87 {
88 	assert(raid_io->base_bdev_io_remaining >= completed);
89 	raid_io->base_bdev_io_remaining -= completed;
90 
91 	if (status != SPDK_BDEV_IO_STATUS_SUCCESS) {
92 		raid_io->base_bdev_io_status = status;
93 	}
94 
95 	if (raid_io->base_bdev_io_remaining == 0) {
96 		raid_bdev_io_complete(raid_io, raid_io->base_bdev_io_status);
97 		return true;
98 	} else {
99 		return false;
100 	}
101 }
102 
103 static void
104 init_accel(void)
105 {
106 	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
107 				sizeof(int), "accel_p");
108 }
109 
110 static void
111 fini_accel(void)
112 {
113 	spdk_io_device_unregister(g_accel_p, NULL);
114 }
115 
116 static int
117 test_setup(void)
118 {
119 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
120 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
121 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
122 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
123 	uint32_t md_len_values[] = { 0, 64 };
124 	uint8_t *num_base_bdevs;
125 	uint64_t *base_bdev_blockcnt;
126 	uint32_t *base_bdev_blocklen;
127 	uint32_t *strip_size_kb;
128 	uint32_t *md_len;
129 	struct raid_params params;
130 	uint64_t params_count;
131 	int rc;
132 
133 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
134 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
135 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
136 		       SPDK_COUNTOF(strip_size_kb_values) *
137 		       SPDK_COUNTOF(md_len_values);
138 	rc = raid_test_params_alloc(params_count);
139 	if (rc) {
140 		return rc;
141 	}
142 
143 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
144 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
145 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
146 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
147 					ARRAY_FOR_EACH(md_len_values, md_len) {
148 						params.num_base_bdevs = *num_base_bdevs;
149 						params.base_bdev_blockcnt = *base_bdev_blockcnt;
150 						params.base_bdev_blocklen = *base_bdev_blocklen;
151 						params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
152 						params.md_len = *md_len;
153 						if (params.strip_size == 0 ||
154 						    params.strip_size > *base_bdev_blockcnt) {
155 							continue;
156 						}
157 						raid_test_params_add(&params);
158 					}
159 				}
160 			}
161 		}
162 	}
163 
164 	init_accel();
165 
166 	return 0;
167 }
168 
169 static int
170 test_cleanup(void)
171 {
172 	fini_accel();
173 	raid_test_params_free();
174 	return 0;
175 }
176 
177 static struct raid5f_info *
178 create_raid5f(struct raid_params *params)
179 {
180 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid5f_module);
181 
182 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
183 
184 	return raid_bdev->module_private;
185 }
186 
187 static void
188 delete_raid5f(struct raid5f_info *r5f_info)
189 {
190 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
191 
192 	raid5f_stop(raid_bdev);
193 
194 	raid_test_delete_raid_bdev(raid_bdev);
195 }
196 
197 static void
198 test_raid5f_start(void)
199 {
200 	struct raid_params *params;
201 
202 	RAID_PARAMS_FOR_EACH(params) {
203 		struct raid5f_info *r5f_info;
204 
205 		r5f_info = create_raid5f(params);
206 
207 		SPDK_CU_ASSERT_FATAL(r5f_info != NULL);
208 
209 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
210 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
211 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
212 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
213 				(params->num_base_bdevs - 1));
214 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
215 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
216 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
217 
218 		delete_raid5f(r5f_info);
219 	}
220 }
221 
222 enum test_bdev_error_type {
223 	TEST_BDEV_ERROR_NONE,
224 	TEST_BDEV_ERROR_SUBMIT,
225 	TEST_BDEV_ERROR_COMPLETE,
226 	TEST_BDEV_ERROR_NOMEM,
227 };
228 
229 struct raid_io_info {
230 	struct raid5f_info *r5f_info;
231 	struct raid_bdev_io_channel *raid_ch;
232 	enum spdk_bdev_io_type io_type;
233 	uint64_t offset_blocks;
234 	uint64_t num_blocks;
235 	void *src_buf;
236 	void *dest_buf;
237 	void *src_md_buf;
238 	void *dest_md_buf;
239 	size_t buf_size;
240 	void *parity_buf;
241 	void *reference_parity;
242 	size_t parity_buf_size;
243 	void *parity_md_buf;
244 	void *reference_md_parity;
245 	size_t parity_md_buf_size;
246 	enum spdk_bdev_io_status status;
247 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
248 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
249 	struct {
250 		enum test_bdev_error_type type;
251 		struct spdk_bdev *bdev;
252 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
253 		void *on_enomem_cb_ctx;
254 	} error;
255 };
256 
257 struct test_raid_bdev_io {
258 	char bdev_io_buf[sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)];
259 	struct raid_io_info *io_info;
260 	void *buf;
261 	void *buf_md;
262 };
263 
264 void
265 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
266 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
267 {
268 	struct raid_io_info *io_info;
269 
270 	io_info = ((struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io))->io_info;
271 
272 	raid_io->waitq_entry.bdev = bdev;
273 	raid_io->waitq_entry.cb_fn = cb_fn;
274 	raid_io->waitq_entry.cb_arg = raid_io;
275 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
276 }
277 
278 static void
279 raid_bdev_io_completion_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
280 {
281 	struct raid_io_info *io_info = cb_arg;
282 
283 	spdk_bdev_free_io(bdev_io);
284 
285 	if (!success) {
286 		io_info->status = SPDK_BDEV_IO_STATUS_FAILED;
287 	} else {
288 		io_info->status = SPDK_BDEV_IO_STATUS_SUCCESS;
289 	}
290 }
291 
292 static struct raid_bdev_io *
293 get_raid_io(struct raid_io_info *io_info)
294 {
295 	struct spdk_bdev_io *bdev_io;
296 	struct raid_bdev_io *raid_io;
297 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
298 	uint32_t blocklen = raid_bdev->bdev.blocklen;
299 	struct test_raid_bdev_io *test_raid_bdev_io;
300 
301 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
302 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
303 
304 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io->bdev_io_buf == (char *)test_raid_bdev_io);
305 	bdev_io = (struct spdk_bdev_io *)test_raid_bdev_io->bdev_io_buf;
306 	bdev_io->bdev = &raid_bdev->bdev;
307 	bdev_io->type = io_info->io_type;
308 	bdev_io->u.bdev.offset_blocks = io_info->offset_blocks;
309 	bdev_io->u.bdev.num_blocks = io_info->num_blocks;
310 	bdev_io->internal.cb = raid_bdev_io_completion_cb;
311 	bdev_io->internal.caller_ctx = io_info;
312 
313 	raid_io = (void *)bdev_io->driver_ctx;
314 	raid_io->raid_bdev = raid_bdev;
315 	raid_io->raid_ch = io_info->raid_ch;
316 	raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
317 
318 	test_raid_bdev_io->io_info = io_info;
319 
320 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
321 		test_raid_bdev_io->buf = io_info->src_buf;
322 		test_raid_bdev_io->buf_md = io_info->src_md_buf;
323 		bdev_io->iov.iov_base = io_info->dest_buf;
324 		bdev_io->u.bdev.md_buf = io_info->dest_md_buf;
325 	} else {
326 		test_raid_bdev_io->buf = io_info->dest_buf;
327 		test_raid_bdev_io->buf_md = io_info->dest_md_buf;
328 		bdev_io->iov.iov_base = io_info->src_buf;
329 		bdev_io->u.bdev.md_buf = io_info->src_md_buf;
330 	}
331 
332 	bdev_io->u.bdev.iovs = &bdev_io->iov;
333 	bdev_io->u.bdev.iovcnt = 1;
334 	bdev_io->iov.iov_len = io_info->num_blocks * blocklen;
335 
336 	return raid_io;
337 }
338 
339 void
340 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
341 {
342 	free(bdev_io);
343 }
344 
345 static int
346 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
347 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
348 {
349 	struct spdk_bdev *bdev = desc->bdev;
350 	struct spdk_bdev_io *bdev_io;
351 
352 	if (bdev == io_info->error.bdev) {
353 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
354 			return -EINVAL;
355 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
356 			return -ENOMEM;
357 		}
358 	}
359 
360 	bdev_io = calloc(1, sizeof(*bdev_io));
361 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
362 	bdev_io->bdev = bdev;
363 	bdev_io->internal.cb = cb;
364 	bdev_io->internal.caller_ctx = cb_arg;
365 
366 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
367 
368 	return 0;
369 }
370 
371 static void
372 process_io_completions(struct raid_io_info *io_info)
373 {
374 	struct spdk_bdev_io *bdev_io;
375 	bool success;
376 
377 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
378 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
379 
380 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
381 		    io_info->error.bdev == bdev_io->bdev) {
382 			success = false;
383 		} else {
384 			success = true;
385 		}
386 
387 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
388 	}
389 
390 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
391 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
392 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
393 
394 		io_info->error.type = TEST_BDEV_ERROR_NONE;
395 
396 		if (io_info->error.on_enomem_cb != NULL) {
397 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
398 		}
399 
400 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
401 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
402 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
403 			waitq_entry->cb_fn(waitq_entry->cb_arg);
404 		}
405 
406 		process_io_completions(io_info);
407 	} else {
408 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
409 	}
410 }
411 
412 #define DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset) ((data_offset >> raid_bdev->blocklen_shift) * raid_bdev->bdev.md_len)
413 
414 int
415 spdk_bdev_writev_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
416 				struct iovec *iov, int iovcnt, void *md_buf,
417 				uint64_t offset_blocks, uint64_t num_blocks,
418 				spdk_bdev_io_completion_cb cb, void *cb_arg)
419 {
420 	struct chunk *chunk = cb_arg;
421 	struct stripe_request *stripe_req;
422 	struct test_raid_bdev_io *test_raid_bdev_io;
423 	struct raid_io_info *io_info;
424 	struct raid_bdev *raid_bdev;
425 	uint8_t data_chunk_idx;
426 	uint64_t data_offset;
427 	void *dest_buf, *dest_md_buf;
428 
429 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_complete_bdev_io);
430 	SPDK_CU_ASSERT_FATAL(iovcnt == 1);
431 
432 	stripe_req = raid5f_chunk_stripe_req(chunk);
433 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(stripe_req->raid_io);
434 	io_info = test_raid_bdev_io->io_info;
435 	raid_bdev = io_info->r5f_info->raid_bdev;
436 
437 	if (chunk == stripe_req->parity_chunk) {
438 		if (io_info->parity_buf == NULL) {
439 			goto submit;
440 		}
441 		dest_buf = io_info->parity_buf;
442 		if (md_buf != NULL) {
443 			dest_md_buf = io_info->parity_md_buf;
444 		}
445 	} else {
446 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
447 		data_offset = data_chunk_idx * raid_bdev->strip_size * raid_bdev->bdev.blocklen;
448 		dest_buf = test_raid_bdev_io->buf + data_offset;
449 		if (md_buf != NULL) {
450 			data_offset = DATA_OFFSET_TO_MD_OFFSET(raid_bdev, data_offset);
451 			dest_md_buf = test_raid_bdev_io->buf_md + data_offset;
452 		}
453 	}
454 
455 	memcpy(dest_buf, iov->iov_base, iov->iov_len);
456 	if (md_buf != NULL) {
457 		memcpy(dest_md_buf, md_buf, DATA_OFFSET_TO_MD_OFFSET(raid_bdev, iov->iov_len));
458 	}
459 
460 submit:
461 	return submit_io(io_info, desc, cb, cb_arg);
462 }
463 
464 int
465 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
466 			struct iovec *iov, int iovcnt,
467 			uint64_t offset_blocks, uint64_t num_blocks,
468 			spdk_bdev_io_completion_cb cb, void *cb_arg)
469 {
470 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
471 					       cb_arg);
472 }
473 
474 int
475 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
476 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks,
477 			    uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
478 			    struct spdk_bdev_ext_io_opts *opts)
479 {
480 	CU_ASSERT_PTR_NULL(opts->memory_domain);
481 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
482 
483 	return spdk_bdev_writev_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
484 					       num_blocks, cb, cb_arg);
485 }
486 
487 int
488 spdk_bdev_readv_blocks_with_md(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
489 			       struct iovec *iov, int iovcnt, void *md_buf,
490 			       uint64_t offset_blocks, uint64_t num_blocks,
491 			       spdk_bdev_io_completion_cb cb, void *cb_arg)
492 {
493 	struct raid_bdev_io *raid_io = cb_arg;
494 	struct test_raid_bdev_io *test_raid_bdev_io;
495 
496 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
497 	SPDK_CU_ASSERT_FATAL(iovcnt == 1);
498 
499 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io);
500 
501 	memcpy(iov->iov_base, test_raid_bdev_io->buf, iov->iov_len);
502 	if (md_buf != NULL) {
503 		memcpy(md_buf, test_raid_bdev_io->buf_md, DATA_OFFSET_TO_MD_OFFSET(raid_io->raid_bdev,
504 				iov->iov_len));
505 	}
506 
507 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
508 }
509 
510 int
511 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
512 		       struct iovec *iov, int iovcnt,
513 		       uint64_t offset_blocks, uint64_t num_blocks,
514 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
515 {
516 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, NULL, offset_blocks, num_blocks, cb,
517 					      cb_arg);
518 }
519 
520 int
521 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
522 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks,
523 			   uint64_t num_blocks, spdk_bdev_io_completion_cb cb, void *cb_arg,
524 			   struct spdk_bdev_ext_io_opts *opts)
525 {
526 	CU_ASSERT_PTR_NULL(opts->memory_domain);
527 	CU_ASSERT_PTR_NULL(opts->memory_domain_ctx);
528 
529 	return spdk_bdev_readv_blocks_with_md(desc, ch, iov, iovcnt, opts->metadata, offset_blocks,
530 					      num_blocks, cb, cb_arg);
531 }
532 
533 static void
534 xor_block(uint8_t *a, uint8_t *b, size_t size)
535 {
536 	while (size-- > 0) {
537 		a[size] ^= b[size];
538 	}
539 }
540 
541 static void
542 test_raid5f_write_request(struct raid_io_info *io_info)
543 {
544 	struct raid_bdev_io *raid_io;
545 
546 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
547 
548 	raid_io = get_raid_io(io_info);
549 
550 	raid5f_submit_rw_request(raid_io);
551 
552 	poll_threads();
553 
554 	process_io_completions(io_info);
555 
556 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS) {
557 		if (io_info->parity_buf) {
558 			CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
559 					 io_info->parity_buf_size) == 0);
560 		}
561 		if (io_info->parity_md_buf) {
562 			CU_ASSERT(memcmp(io_info->parity_md_buf, io_info->reference_md_parity,
563 					 io_info->parity_md_buf_size) == 0);
564 		}
565 	}
566 }
567 
568 static void
569 test_raid5f_read_request(struct raid_io_info *io_info)
570 {
571 	struct raid_bdev_io *raid_io;
572 
573 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks <= io_info->r5f_info->raid_bdev->strip_size);
574 
575 	raid_io = get_raid_io(io_info);
576 
577 	raid5f_submit_rw_request(raid_io);
578 
579 	process_io_completions(io_info);
580 }
581 
582 static void
583 deinit_io_info(struct raid_io_info *io_info)
584 {
585 	free(io_info->src_buf);
586 	free(io_info->dest_buf);
587 	free(io_info->src_md_buf);
588 	free(io_info->dest_md_buf);
589 	free(io_info->parity_buf);
590 	free(io_info->reference_parity);
591 	free(io_info->parity_md_buf);
592 	free(io_info->reference_md_parity);
593 }
594 
595 static void
596 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
597 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
598 	     uint64_t offset_blocks, uint64_t num_blocks)
599 {
600 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
601 	uint32_t blocklen = raid_bdev->bdev.blocklen;
602 	void *src_buf, *dest_buf;
603 	void *src_md_buf, *dest_md_buf;
604 	size_t buf_size = num_blocks * blocklen;
605 	size_t buf_md_size = num_blocks * raid_bdev->bdev.md_len;
606 	uint64_t block;
607 	uint64_t i;
608 
609 	memset(io_info, 0, sizeof(*io_info));
610 
611 	if (buf_size) {
612 		src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
613 		SPDK_CU_ASSERT_FATAL(src_buf != NULL);
614 
615 		dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
616 		SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
617 
618 		memset(src_buf, 0xff, buf_size);
619 		for (block = 0; block < num_blocks; block++) {
620 			*((uint64_t *)(src_buf + block * blocklen)) = block;
621 		}
622 	} else {
623 		src_buf = NULL;
624 		dest_buf = NULL;
625 	}
626 
627 	if (buf_md_size) {
628 		src_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
629 		SPDK_CU_ASSERT_FATAL(src_md_buf != NULL);
630 
631 		dest_md_buf = spdk_dma_malloc(buf_md_size, 4096, NULL);
632 		SPDK_CU_ASSERT_FATAL(dest_md_buf != NULL);
633 
634 		memset(src_md_buf, 0xff, buf_md_size);
635 		for (i = 0; i < buf_md_size; i++) {
636 			*((uint8_t *)(src_md_buf + i)) = (uint8_t)i;
637 		}
638 	} else {
639 		src_md_buf = NULL;
640 		dest_md_buf = NULL;
641 	}
642 
643 	io_info->r5f_info = r5f_info;
644 	io_info->raid_ch = raid_ch;
645 	io_info->io_type = io_type;
646 	io_info->offset_blocks = offset_blocks;
647 	io_info->num_blocks = num_blocks;
648 	io_info->src_buf = src_buf;
649 	io_info->dest_buf = dest_buf;
650 	io_info->src_md_buf = src_md_buf;
651 	io_info->dest_md_buf = dest_md_buf;
652 	io_info->buf_size = buf_size;
653 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
654 
655 	TAILQ_INIT(&io_info->bdev_io_queue);
656 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
657 }
658 
659 static void
660 io_info_setup_parity(struct raid_io_info *io_info)
661 {
662 	struct raid5f_info *r5f_info = io_info->r5f_info;
663 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
664 	uint32_t blocklen = raid_bdev->bdev.blocklen;
665 	size_t strip_len = raid_bdev->strip_size * blocklen;
666 	size_t strip_md_len = raid_bdev->strip_size * raid_bdev->bdev.md_len;
667 	void *src = io_info->src_buf;
668 	unsigned i;
669 
670 	io_info->parity_buf_size = strip_len;
671 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
672 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
673 
674 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
675 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
676 
677 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
678 		xor_block(io_info->reference_parity, src, strip_len);
679 		src += strip_len;
680 	}
681 
682 	io_info->parity_md_buf_size = strip_md_len;
683 	io_info->parity_md_buf = calloc(1, io_info->parity_md_buf_size);
684 	SPDK_CU_ASSERT_FATAL(io_info->parity_md_buf != NULL);
685 
686 	io_info->reference_md_parity = calloc(1, io_info->parity_md_buf_size);
687 	SPDK_CU_ASSERT_FATAL(io_info->reference_md_parity != NULL);
688 
689 	src = io_info->src_md_buf;
690 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
691 		xor_block(io_info->reference_md_parity, src, strip_md_len);
692 		src += strip_md_len;
693 	}
694 }
695 
696 static void
697 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
698 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
699 			      uint64_t num_blocks)
700 {
701 	uint64_t offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
702 	struct raid_io_info io_info;
703 
704 	SPDK_CU_ASSERT_FATAL(stripe_offset_blocks < r5f_info->stripe_blocks);
705 
706 	init_io_info(&io_info, r5f_info, raid_ch, io_type, offset_blocks, num_blocks);
707 
708 	switch (io_type) {
709 	case SPDK_BDEV_IO_TYPE_READ:
710 		test_raid5f_read_request(&io_info);
711 		break;
712 	case SPDK_BDEV_IO_TYPE_WRITE:
713 		io_info_setup_parity(&io_info);
714 		test_raid5f_write_request(&io_info);
715 		break;
716 	default:
717 		CU_FAIL_FATAL("unsupported io_type");
718 	}
719 
720 	assert(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
721 	assert(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
722 
723 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
724 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
725 
726 	deinit_io_info(&io_info);
727 }
728 
729 static void
730 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
731 			   struct raid_bdev_io_channel *raid_ch))
732 {
733 	struct raid_params *params;
734 
735 	RAID_PARAMS_FOR_EACH(params) {
736 		struct raid5f_info *r5f_info;
737 		struct raid_bdev_io_channel raid_ch = { 0 };
738 
739 		r5f_info = create_raid5f(params);
740 
741 		raid_ch.num_channels = params->num_base_bdevs;
742 		raid_ch.base_channel = calloc(params->num_base_bdevs, sizeof(struct spdk_io_channel *));
743 		SPDK_CU_ASSERT_FATAL(raid_ch.base_channel != NULL);
744 
745 		raid_ch.module_channel = raid5f_get_io_channel(r5f_info->raid_bdev);
746 		SPDK_CU_ASSERT_FATAL(raid_ch.module_channel);
747 
748 		test_fn(r5f_info->raid_bdev, &raid_ch);
749 
750 		spdk_put_io_channel(raid_ch.module_channel);
751 		poll_threads();
752 
753 		free(raid_ch.base_channel);
754 
755 		delete_raid5f(r5f_info);
756 	}
757 }
758 
759 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
760 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
761 
762 static void
763 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
764 {
765 	struct raid5f_info *r5f_info = raid_bdev->module_private;
766 	uint32_t strip_size = raid_bdev->strip_size;
767 	uint64_t stripe_index;
768 	unsigned int i;
769 
770 	for (i = 0; i < raid5f_stripe_data_chunks_num(raid_bdev); i++) {
771 		uint64_t stripe_offset = i * strip_size;
772 
773 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
774 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
775 						      stripe_index, stripe_offset, 1);
776 
777 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
778 						      stripe_index, stripe_offset, strip_size);
779 
780 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
781 						      stripe_index, stripe_offset + strip_size - 1, 1);
782 			if (strip_size <= 2) {
783 				continue;
784 			}
785 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
786 						      stripe_index, stripe_offset + 1, strip_size - 2);
787 		}
788 	}
789 }
790 static void
791 test_raid5f_submit_read_request(void)
792 {
793 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
794 }
795 
796 static void
797 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
798 					struct raid_bdev_io_channel *raid_ch)
799 {
800 	struct raid5f_info *r5f_info = raid_bdev->module_private;
801 	struct raid5f_io_channel *r5ch = spdk_io_channel_get_ctx(raid_ch->module_channel);
802 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
803 	struct raid_io_info io_info;
804 	struct raid_bdev_io *raid_io;
805 	struct spdk_bdev_io *bdev_io;
806 	struct stripe_request *stripe_req;
807 	struct chunk *chunk;
808 	struct iovec iovs[] = {
809 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
810 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
811 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
812 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
813 	};
814 	size_t iovcnt = SPDK_COUNTOF(iovs);
815 	int ret;
816 
817 	init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE, 0, 0);
818 
819 	raid_io = get_raid_io(&io_info);
820 	bdev_io = spdk_bdev_io_from_ctx(raid_io);
821 	bdev_io->u.bdev.iovs = iovs;
822 	bdev_io->u.bdev.iovcnt = iovcnt;
823 
824 	stripe_req = raid5f_stripe_request_alloc(r5ch, STRIPE_REQ_WRITE);
825 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
826 
827 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
828 	stripe_req->raid_io = raid_io;
829 
830 	ret = raid5f_stripe_request_map_iovecs(stripe_req);
831 	CU_ASSERT(ret == 0);
832 
833 	chunk = &stripe_req->chunks[0];
834 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
835 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
836 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
837 
838 	chunk = &stripe_req->chunks[1];
839 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
840 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
841 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
842 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
843 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
844 
845 	if (raid_bdev->num_base_bdevs > 3) {
846 		chunk = &stripe_req->chunks[2];
847 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
848 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
849 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
850 	}
851 	if (raid_bdev->num_base_bdevs > 4) {
852 		chunk = &stripe_req->chunks[3];
853 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
854 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
855 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
856 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
857 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
858 	}
859 
860 	raid5f_stripe_request_free(stripe_req);
861 	spdk_bdev_free_io(bdev_io);
862 	deinit_io_info(&io_info);
863 }
864 static void
865 test_raid5f_stripe_request_map_iovecs(void)
866 {
867 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
868 }
869 
870 static void
871 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
872 		struct raid_bdev_io_channel *raid_ch)
873 {
874 	struct raid5f_info *r5f_info = raid_bdev->module_private;
875 	uint64_t stripe_index;
876 
877 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
878 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
879 					      stripe_index, 0, r5f_info->stripe_blocks);
880 	}
881 }
882 static void
883 test_raid5f_submit_full_stripe_write_request(void)
884 {
885 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
886 }
887 
888 static void
889 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
890 {
891 	struct raid5f_info *r5f_info = raid_bdev->module_private;
892 	struct raid_base_bdev_info *base_bdev_info;
893 	uint64_t stripe_index;
894 	struct raid_io_info io_info;
895 	enum test_bdev_error_type error_type;
896 
897 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
898 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
899 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
900 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
901 					     stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
902 
903 				io_info.error.type = error_type;
904 				io_info.error.bdev = base_bdev_info->desc->bdev;
905 
906 				test_raid5f_write_request(&io_info);
907 
908 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
909 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
910 				} else {
911 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
912 				}
913 
914 				deinit_io_info(&io_info);
915 			}
916 		}
917 	}
918 }
919 static void
920 test_raid5f_chunk_write_error(void)
921 {
922 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
923 }
924 
925 struct chunk_write_error_with_enomem_ctx {
926 	enum test_bdev_error_type error_type;
927 	struct spdk_bdev *bdev;
928 };
929 
930 static void
931 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
932 {
933 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
934 
935 	io_info->error.type = ctx->error_type;
936 	io_info->error.bdev = ctx->bdev;
937 }
938 
939 static void
940 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
941 		struct raid_bdev_io_channel *raid_ch)
942 {
943 	struct raid5f_info *r5f_info = raid_bdev->module_private;
944 	struct raid_base_bdev_info *base_bdev_info;
945 	uint64_t stripe_index;
946 	struct raid_io_info io_info;
947 	enum test_bdev_error_type error_type;
948 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
949 
950 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
951 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
952 			struct raid_base_bdev_info *base_bdev_info_last =
953 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
954 
955 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
956 				if (base_bdev_info == base_bdev_info_last) {
957 					continue;
958 				}
959 
960 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
961 					     stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
962 
963 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
964 				io_info.error.bdev = base_bdev_info->desc->bdev;
965 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
966 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
967 				on_enomem_cb_ctx.error_type = error_type;
968 				on_enomem_cb_ctx.bdev = base_bdev_info_last->desc->bdev;
969 
970 				test_raid5f_write_request(&io_info);
971 
972 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
973 
974 				deinit_io_info(&io_info);
975 			}
976 		}
977 	}
978 }
979 static void
980 test_raid5f_chunk_write_error_with_enomem(void)
981 {
982 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
983 }
984 
985 int
986 main(int argc, char **argv)
987 {
988 	CU_pSuite suite = NULL;
989 	unsigned int num_failures;
990 
991 	CU_initialize_registry();
992 
993 	suite = CU_add_suite("raid5f", test_setup, test_cleanup);
994 	CU_ADD_TEST(suite, test_raid5f_start);
995 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
996 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
997 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
998 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
999 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
1000 
1001 	allocate_threads(1);
1002 	set_thread(0);
1003 
1004 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1005 	CU_cleanup_registry();
1006 
1007 	free_threads();
1008 
1009 	return num_failures;
1010 }
1011