xref: /spdk/test/unit/lib/bdev/raid/raid5f.c/raid5f_ut.c (revision 588dfe314bb83d86effdf67ec42837b11c2620bf)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "spdk/env.h"
9 
10 #include "common/lib/ut_multithread.c"
11 
12 #include "bdev/raid/raid5f.c"
13 
14 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
15 DEFINE_STUB(spdk_bdev_get_buf_align, size_t, (const struct spdk_bdev *bdev), 0);
16 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
17 
18 struct spdk_bdev_desc {
19 	struct spdk_bdev *bdev;
20 };
21 
22 void
23 raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
24 {
25 	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);
26 
27 	if (bdev_io->internal.cb) {
28 		bdev_io->internal.cb(bdev_io, status == SPDK_BDEV_IO_STATUS_SUCCESS, bdev_io->internal.caller_ctx);
29 	}
30 }
31 
32 bool
33 raid_bdev_io_complete_part(struct raid_bdev_io *raid_io, uint64_t completed,
34 			   enum spdk_bdev_io_status status)
35 {
36 	assert(raid_io->base_bdev_io_remaining >= completed);
37 	raid_io->base_bdev_io_remaining -= completed;
38 
39 	if (status != SPDK_BDEV_IO_STATUS_SUCCESS) {
40 		raid_io->base_bdev_io_status = status;
41 	}
42 
43 	if (raid_io->base_bdev_io_remaining == 0) {
44 		raid_bdev_io_complete(raid_io, raid_io->base_bdev_io_status);
45 		return true;
46 	} else {
47 		return false;
48 	}
49 }
50 
51 struct raid5f_params {
52 	uint8_t num_base_bdevs;
53 	uint64_t base_bdev_blockcnt;
54 	uint32_t base_bdev_blocklen;
55 	uint32_t strip_size;
56 };
57 
58 static struct raid5f_params *g_params;
59 static size_t g_params_count;
60 
61 #define ARRAY_FOR_EACH(a, e) \
62 	for (e = a; e < a + SPDK_COUNTOF(a); e++)
63 
64 #define RAID5F_PARAMS_FOR_EACH(p) \
65 	for (p = g_params; p < g_params + g_params_count; p++)
66 
67 static int
68 test_setup(void)
69 {
70 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
71 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
72 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
73 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
74 	uint8_t *num_base_bdevs;
75 	uint64_t *base_bdev_blockcnt;
76 	uint32_t *base_bdev_blocklen;
77 	uint32_t *strip_size_kb;
78 	struct raid5f_params *params;
79 
80 	g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
81 			 SPDK_COUNTOF(base_bdev_blockcnt_values) *
82 			 SPDK_COUNTOF(base_bdev_blocklen_values) *
83 			 SPDK_COUNTOF(strip_size_kb_values);
84 	g_params = calloc(g_params_count, sizeof(*g_params));
85 	if (!g_params) {
86 		return -ENOMEM;
87 	}
88 
89 	params = g_params;
90 
91 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
92 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
93 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
94 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
95 					params->num_base_bdevs = *num_base_bdevs;
96 					params->base_bdev_blockcnt = *base_bdev_blockcnt;
97 					params->base_bdev_blocklen = *base_bdev_blocklen;
98 					params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
99 					if (params->strip_size == 0 ||
100 					    params->strip_size > *base_bdev_blockcnt) {
101 						g_params_count--;
102 						continue;
103 					}
104 					params++;
105 				}
106 			}
107 		}
108 	}
109 
110 	return 0;
111 }
112 
113 static int
114 test_cleanup(void)
115 {
116 	free(g_params);
117 	return 0;
118 }
119 
120 static struct raid_bdev *
121 create_raid_bdev(struct raid5f_params *params)
122 {
123 	struct raid_bdev *raid_bdev;
124 	struct raid_base_bdev_info *base_info;
125 
126 	raid_bdev = calloc(1, sizeof(*raid_bdev));
127 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
128 
129 	raid_bdev->module = &g_raid5f_module;
130 	raid_bdev->num_base_bdevs = params->num_base_bdevs;
131 	raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
132 					   sizeof(struct raid_base_bdev_info));
133 	SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
134 
135 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
136 		struct spdk_bdev *bdev;
137 		struct spdk_bdev_desc *desc;
138 
139 		bdev = calloc(1, sizeof(*bdev));
140 		SPDK_CU_ASSERT_FATAL(bdev != NULL);
141 		bdev->blockcnt = params->base_bdev_blockcnt;
142 		bdev->blocklen = params->base_bdev_blocklen;
143 
144 		desc = calloc(1, sizeof(*desc));
145 		SPDK_CU_ASSERT_FATAL(desc != NULL);
146 		desc->bdev = bdev;
147 
148 		base_info->bdev = bdev;
149 		base_info->desc = desc;
150 	}
151 
152 	raid_bdev->strip_size = params->strip_size;
153 	raid_bdev->strip_size_kb = params->strip_size * params->base_bdev_blocklen / 1024;
154 	raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
155 	raid_bdev->blocklen_shift = spdk_u32log2(params->base_bdev_blocklen);
156 	raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
157 
158 	return raid_bdev;
159 }
160 
161 static void
162 delete_raid_bdev(struct raid_bdev *raid_bdev)
163 {
164 	struct raid_base_bdev_info *base_info;
165 
166 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
167 		free(base_info->bdev);
168 		free(base_info->desc);
169 	}
170 	free(raid_bdev->base_bdev_info);
171 	free(raid_bdev);
172 }
173 
174 static struct raid5f_info *
175 create_raid5f(struct raid5f_params *params)
176 {
177 	struct raid_bdev *raid_bdev = create_raid_bdev(params);
178 
179 	SPDK_CU_ASSERT_FATAL(raid5f_start(raid_bdev) == 0);
180 
181 	return raid_bdev->module_private;
182 }
183 
184 static void
185 delete_raid5f(struct raid5f_info *r5f_info)
186 {
187 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
188 
189 	raid5f_stop(raid_bdev);
190 
191 	delete_raid_bdev(raid_bdev);
192 }
193 
194 static void
195 test_raid5f_start(void)
196 {
197 	struct raid5f_params *params;
198 
199 	RAID5F_PARAMS_FOR_EACH(params) {
200 		struct raid5f_info *r5f_info;
201 
202 		r5f_info = create_raid5f(params);
203 
204 		CU_ASSERT_EQUAL(r5f_info->stripe_blocks, params->strip_size * (params->num_base_bdevs - 1));
205 		CU_ASSERT_EQUAL(r5f_info->total_stripes, params->base_bdev_blockcnt / params->strip_size);
206 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.blockcnt,
207 				(params->base_bdev_blockcnt - params->base_bdev_blockcnt % params->strip_size) *
208 				(params->num_base_bdevs - 1));
209 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.optimal_io_boundary, params->strip_size);
210 		CU_ASSERT_TRUE(r5f_info->raid_bdev->bdev.split_on_optimal_io_boundary);
211 		CU_ASSERT_EQUAL(r5f_info->raid_bdev->bdev.write_unit_size, r5f_info->stripe_blocks);
212 
213 		delete_raid5f(r5f_info);
214 	}
215 }
216 
217 enum test_bdev_error_type {
218 	TEST_BDEV_ERROR_NONE,
219 	TEST_BDEV_ERROR_SUBMIT,
220 	TEST_BDEV_ERROR_COMPLETE,
221 	TEST_BDEV_ERROR_NOMEM,
222 };
223 
224 struct raid_io_info {
225 	struct raid5f_info *r5f_info;
226 	struct raid_bdev_io_channel *raid_ch;
227 	enum spdk_bdev_io_type io_type;
228 	uint64_t offset_blocks;
229 	uint64_t num_blocks;
230 	void *src_buf;
231 	void *dest_buf;
232 	size_t buf_size;
233 	void *parity_buf;
234 	void *reference_parity;
235 	size_t parity_buf_size;
236 	enum spdk_bdev_io_status status;
237 	bool failed;
238 	int remaining;
239 	TAILQ_HEAD(, spdk_bdev_io) bdev_io_queue;
240 	TAILQ_HEAD(, spdk_bdev_io_wait_entry) bdev_io_wait_queue;
241 	struct {
242 		enum test_bdev_error_type type;
243 		struct spdk_bdev *bdev;
244 		void (*on_enomem_cb)(struct raid_io_info *io_info, void *ctx);
245 		void *on_enomem_cb_ctx;
246 	} error;
247 };
248 
249 struct test_raid_bdev_io {
250 	char bdev_io_buf[sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)];
251 	struct raid_io_info *io_info;
252 	void *buf;
253 };
254 
255 void
256 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
257 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
258 {
259 	struct raid_io_info *io_info;
260 
261 	io_info = ((struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io))->io_info;
262 
263 	raid_io->waitq_entry.bdev = bdev;
264 	raid_io->waitq_entry.cb_fn = cb_fn;
265 	raid_io->waitq_entry.cb_arg = raid_io;
266 	TAILQ_INSERT_TAIL(&io_info->bdev_io_wait_queue, &raid_io->waitq_entry, link);
267 }
268 
269 static void
270 raid_bdev_io_completion_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
271 {
272 	struct raid_io_info *io_info = cb_arg;
273 
274 	spdk_bdev_free_io(bdev_io);
275 
276 	if (!success) {
277 		io_info->failed = true;
278 	}
279 
280 	if (--io_info->remaining == 0) {
281 		if (io_info->failed) {
282 			io_info->status = SPDK_BDEV_IO_STATUS_FAILED;
283 		} else {
284 			io_info->status = SPDK_BDEV_IO_STATUS_SUCCESS;
285 		}
286 	}
287 }
288 
289 static struct raid_bdev_io *
290 get_raid_io(struct raid_io_info *io_info, uint64_t offset_blocks_split, uint64_t num_blocks)
291 {
292 	struct spdk_bdev_io *bdev_io;
293 	struct raid_bdev_io *raid_io;
294 	struct raid_bdev *raid_bdev = io_info->r5f_info->raid_bdev;
295 	uint32_t blocklen = raid_bdev->bdev.blocklen;
296 	struct test_raid_bdev_io *test_raid_bdev_io;
297 	void *src_buf = io_info->src_buf + offset_blocks_split * blocklen;
298 	void *dest_buf = io_info->dest_buf + offset_blocks_split * blocklen;
299 
300 	test_raid_bdev_io = calloc(1, sizeof(*test_raid_bdev_io));
301 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io != NULL);
302 
303 	SPDK_CU_ASSERT_FATAL(test_raid_bdev_io->bdev_io_buf == (char *)test_raid_bdev_io);
304 	bdev_io = (struct spdk_bdev_io *)test_raid_bdev_io->bdev_io_buf;
305 	bdev_io->bdev = &raid_bdev->bdev;
306 	bdev_io->type = io_info->io_type;
307 	bdev_io->u.bdev.offset_blocks = io_info->offset_blocks + offset_blocks_split;
308 	bdev_io->u.bdev.num_blocks = num_blocks;
309 	bdev_io->internal.cb = raid_bdev_io_completion_cb;
310 	bdev_io->internal.caller_ctx = io_info;
311 
312 	raid_io = (void *)bdev_io->driver_ctx;
313 	raid_io->raid_bdev = raid_bdev;
314 	raid_io->raid_ch = io_info->raid_ch;
315 	raid_io->base_bdev_io_status = SPDK_BDEV_IO_STATUS_SUCCESS;
316 
317 	test_raid_bdev_io->io_info = io_info;
318 
319 	if (io_info->io_type == SPDK_BDEV_IO_TYPE_READ) {
320 		test_raid_bdev_io->buf = src_buf;
321 		bdev_io->iov.iov_base = dest_buf;
322 	} else {
323 		test_raid_bdev_io->buf = dest_buf;
324 		bdev_io->iov.iov_base = src_buf;
325 	}
326 
327 	bdev_io->u.bdev.iovs = &bdev_io->iov;
328 	bdev_io->u.bdev.iovcnt = 1;
329 	bdev_io->iov.iov_len = num_blocks * blocklen;
330 
331 	io_info->remaining++;
332 
333 	return raid_io;
334 }
335 
336 void
337 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
338 {
339 	free(bdev_io);
340 }
341 
342 static int
343 submit_io(struct raid_io_info *io_info, struct spdk_bdev_desc *desc,
344 	  spdk_bdev_io_completion_cb cb, void *cb_arg)
345 {
346 	struct spdk_bdev *bdev = desc->bdev;
347 	struct spdk_bdev_io *bdev_io;
348 
349 	if (bdev == io_info->error.bdev) {
350 		if (io_info->error.type == TEST_BDEV_ERROR_SUBMIT) {
351 			return -EINVAL;
352 		} else if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
353 			return -ENOMEM;
354 		}
355 	}
356 
357 	bdev_io = calloc(1, sizeof(*bdev_io));
358 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
359 	bdev_io->bdev = bdev;
360 	bdev_io->internal.cb = cb;
361 	bdev_io->internal.caller_ctx = cb_arg;
362 
363 	TAILQ_INSERT_TAIL(&io_info->bdev_io_queue, bdev_io, internal.link);
364 
365 	return 0;
366 }
367 
368 static void
369 process_io_completions(struct raid_io_info *io_info)
370 {
371 	struct spdk_bdev_io *bdev_io;
372 	bool success;
373 
374 	while ((bdev_io = TAILQ_FIRST(&io_info->bdev_io_queue))) {
375 		TAILQ_REMOVE(&io_info->bdev_io_queue, bdev_io, internal.link);
376 
377 		if (io_info->error.type == TEST_BDEV_ERROR_COMPLETE &&
378 		    io_info->error.bdev == bdev_io->bdev) {
379 			success = false;
380 		} else {
381 			success = true;
382 		}
383 
384 		bdev_io->internal.cb(bdev_io, success, bdev_io->internal.caller_ctx);
385 	}
386 
387 	if (io_info->error.type == TEST_BDEV_ERROR_NOMEM) {
388 		struct spdk_bdev_io_wait_entry *waitq_entry, *tmp;
389 		struct spdk_bdev *enomem_bdev = io_info->error.bdev;
390 
391 		io_info->error.type = TEST_BDEV_ERROR_NONE;
392 
393 		if (io_info->error.on_enomem_cb != NULL) {
394 			io_info->error.on_enomem_cb(io_info, io_info->error.on_enomem_cb_ctx);
395 		}
396 
397 		TAILQ_FOREACH_SAFE(waitq_entry, &io_info->bdev_io_wait_queue, link, tmp) {
398 			TAILQ_REMOVE(&io_info->bdev_io_wait_queue, waitq_entry, link);
399 			CU_ASSERT(waitq_entry->bdev == enomem_bdev);
400 			waitq_entry->cb_fn(waitq_entry->cb_arg);
401 		}
402 
403 		process_io_completions(io_info);
404 	} else {
405 		CU_ASSERT(TAILQ_EMPTY(&io_info->bdev_io_wait_queue));
406 	}
407 }
408 
409 int
410 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
411 			struct iovec *iov, int iovcnt,
412 			uint64_t offset_blocks, uint64_t num_blocks,
413 			spdk_bdev_io_completion_cb cb, void *cb_arg)
414 {
415 	struct chunk *chunk = cb_arg;
416 	struct stripe_request *stripe_req;
417 	struct test_raid_bdev_io *test_raid_bdev_io;
418 	struct raid_io_info *io_info;
419 	struct raid_bdev *raid_bdev;
420 	uint64_t stripe_idx_off;
421 	uint8_t data_chunk_idx;
422 	void *dest_buf;
423 
424 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_write_complete_bdev_io);
425 	SPDK_CU_ASSERT_FATAL(iovcnt == 1);
426 
427 	stripe_req = raid5f_chunk_stripe_req(chunk);
428 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(stripe_req->raid_io);
429 	io_info = test_raid_bdev_io->io_info;
430 
431 	raid_bdev = io_info->r5f_info->raid_bdev;
432 
433 	stripe_idx_off = offset_blocks / raid_bdev->strip_size -
434 			 io_info->offset_blocks / io_info->r5f_info->stripe_blocks;
435 
436 	if (chunk == stripe_req->parity_chunk) {
437 		if (io_info->parity_buf == NULL) {
438 			goto submit;
439 		}
440 		dest_buf = io_info->parity_buf + stripe_idx_off * raid_bdev->strip_size_kb * 1024;
441 	} else {
442 		data_chunk_idx = chunk < stripe_req->parity_chunk ? chunk->index : chunk->index - 1;
443 		dest_buf = test_raid_bdev_io->buf +
444 			   (stripe_idx_off * io_info->r5f_info->stripe_blocks +
445 			    data_chunk_idx * raid_bdev->strip_size) *
446 			   raid_bdev->bdev.blocklen;
447 	}
448 
449 	memcpy(dest_buf, iov->iov_base, iov->iov_len);
450 submit:
451 	return submit_io(io_info, desc, cb, cb_arg);
452 }
453 
454 int
455 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
456 		       struct iovec *iov, int iovcnt,
457 		       uint64_t offset_blocks, uint64_t num_blocks,
458 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
459 {
460 	struct raid_bdev_io *raid_io = cb_arg;
461 	struct test_raid_bdev_io *test_raid_bdev_io;
462 
463 	SPDK_CU_ASSERT_FATAL(cb == raid5f_chunk_read_complete);
464 	SPDK_CU_ASSERT_FATAL(iovcnt == 1);
465 
466 	test_raid_bdev_io = (struct test_raid_bdev_io *)spdk_bdev_io_from_ctx(raid_io);
467 
468 	memcpy(iov->iov_base, test_raid_bdev_io->buf, iov->iov_len);
469 
470 	return submit_io(test_raid_bdev_io->io_info, desc, cb, cb_arg);
471 }
472 
473 static void
474 xor_block(uint8_t *a, uint8_t *b, size_t size)
475 {
476 	while (size-- > 0) {
477 		a[size] ^= b[size];
478 	}
479 }
480 
481 static void
482 test_raid5f_write_request(struct raid_io_info *io_info)
483 {
484 	struct raid_bdev_io *raid_io;
485 
486 	SPDK_CU_ASSERT_FATAL(io_info->num_blocks / io_info->r5f_info->stripe_blocks == 1);
487 
488 	raid_io = get_raid_io(io_info, 0, io_info->num_blocks);
489 
490 	raid5f_submit_rw_request(raid_io);
491 
492 	process_io_completions(io_info);
493 
494 	if (io_info->status == SPDK_BDEV_IO_STATUS_SUCCESS && io_info->parity_buf) {
495 		CU_ASSERT(memcmp(io_info->parity_buf, io_info->reference_parity,
496 				 io_info->parity_buf_size) == 0);
497 	}
498 }
499 
500 static void
501 test_raid5f_read_request(struct raid_io_info *io_info)
502 {
503 	uint32_t strip_size = io_info->r5f_info->raid_bdev->strip_size;
504 	uint64_t num_blocks = io_info->num_blocks;
505 	uint64_t offset_blocks_split = 0;
506 
507 	while (num_blocks) {
508 		uint64_t chunk_offset = offset_blocks_split % strip_size;
509 		uint64_t num_blocks_split = spdk_min(num_blocks, strip_size - chunk_offset);
510 		struct raid_bdev_io *raid_io;
511 
512 		raid_io = get_raid_io(io_info, offset_blocks_split, num_blocks_split);
513 
514 		raid5f_submit_rw_request(raid_io);
515 
516 		num_blocks -= num_blocks_split;
517 		offset_blocks_split += num_blocks_split;
518 	}
519 
520 	process_io_completions(io_info);
521 }
522 
523 static void
524 deinit_io_info(struct raid_io_info *io_info)
525 {
526 	free(io_info->src_buf);
527 	free(io_info->dest_buf);
528 	free(io_info->parity_buf);
529 	free(io_info->reference_parity);
530 }
531 
532 static void
533 init_io_info(struct raid_io_info *io_info, struct raid5f_info *r5f_info,
534 	     struct raid_bdev_io_channel *raid_ch, enum spdk_bdev_io_type io_type,
535 	     uint64_t offset_blocks, uint64_t num_blocks)
536 {
537 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
538 	uint32_t blocklen = raid_bdev->bdev.blocklen;
539 	void *src_buf, *dest_buf;
540 	size_t buf_size = num_blocks * blocklen;
541 	uint64_t block;
542 
543 	memset(io_info, 0, sizeof(*io_info));
544 
545 	src_buf = spdk_dma_malloc(buf_size, 4096, NULL);
546 	SPDK_CU_ASSERT_FATAL(src_buf != NULL);
547 
548 	dest_buf = spdk_dma_malloc(buf_size, 4096, NULL);
549 	SPDK_CU_ASSERT_FATAL(dest_buf != NULL);
550 
551 	memset(src_buf, 0xff, buf_size);
552 	for (block = 0; block < num_blocks; block++) {
553 		*((uint64_t *)(src_buf + block * blocklen)) = block;
554 	}
555 
556 	io_info->r5f_info = r5f_info;
557 	io_info->raid_ch = raid_ch;
558 	io_info->io_type = io_type;
559 	io_info->offset_blocks = offset_blocks;
560 	io_info->num_blocks = num_blocks;
561 	io_info->src_buf = src_buf;
562 	io_info->dest_buf = dest_buf;
563 	io_info->buf_size = buf_size;
564 	io_info->status = SPDK_BDEV_IO_STATUS_PENDING;
565 
566 	TAILQ_INIT(&io_info->bdev_io_queue);
567 	TAILQ_INIT(&io_info->bdev_io_wait_queue);
568 }
569 
570 static void
571 io_info_setup_parity(struct raid_io_info *io_info)
572 {
573 	struct raid5f_info *r5f_info = io_info->r5f_info;
574 	struct raid_bdev *raid_bdev = r5f_info->raid_bdev;
575 	uint32_t blocklen = raid_bdev->bdev.blocklen;
576 	uint64_t num_stripes = io_info->num_blocks / r5f_info->stripe_blocks;
577 	size_t strip_len = raid_bdev->strip_size * blocklen;
578 	void *src = io_info->src_buf;
579 	void *dest;
580 	unsigned i, j;
581 
582 	io_info->parity_buf_size = num_stripes * strip_len;
583 	io_info->parity_buf = calloc(1, io_info->parity_buf_size);
584 	SPDK_CU_ASSERT_FATAL(io_info->parity_buf != NULL);
585 
586 	io_info->reference_parity = calloc(1, io_info->parity_buf_size);
587 	SPDK_CU_ASSERT_FATAL(io_info->reference_parity != NULL);
588 
589 	dest = io_info->reference_parity;
590 	for (i = 0; i < num_stripes; i++) {
591 		for (j = 0; j < raid5f_stripe_data_chunks_num(raid_bdev); j++) {
592 			xor_block(dest, src, strip_len);
593 			src += strip_len;
594 		}
595 		dest += strip_len;
596 	}
597 }
598 
599 static void
600 test_raid5f_submit_rw_request(struct raid5f_info *r5f_info, struct raid_bdev_io_channel *raid_ch,
601 			      enum spdk_bdev_io_type io_type, uint64_t stripe_index, uint64_t stripe_offset_blocks,
602 			      uint64_t num_blocks)
603 {
604 	uint64_t offset_blocks = stripe_index * r5f_info->stripe_blocks + stripe_offset_blocks;
605 	struct raid_io_info io_info;
606 
607 	init_io_info(&io_info, r5f_info, raid_ch, io_type, offset_blocks, num_blocks);
608 
609 	switch (io_type) {
610 	case SPDK_BDEV_IO_TYPE_READ:
611 		test_raid5f_read_request(&io_info);
612 		break;
613 	case SPDK_BDEV_IO_TYPE_WRITE:
614 		io_info_setup_parity(&io_info);
615 		test_raid5f_write_request(&io_info);
616 		break;
617 	default:
618 		CU_FAIL_FATAL("unsupported io_type");
619 	}
620 
621 	CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
622 	CU_ASSERT(memcmp(io_info.src_buf, io_info.dest_buf, io_info.buf_size) == 0);
623 
624 	deinit_io_info(&io_info);
625 }
626 
627 static void
628 run_for_each_raid5f_config(void (*test_fn)(struct raid_bdev *raid_bdev,
629 			   struct raid_bdev_io_channel *raid_ch))
630 {
631 	struct raid5f_params *params;
632 
633 	RAID5F_PARAMS_FOR_EACH(params) {
634 		struct raid5f_info *r5f_info;
635 		struct raid_bdev_io_channel raid_ch = { 0 };
636 
637 		r5f_info = create_raid5f(params);
638 
639 		raid_ch.num_channels = params->num_base_bdevs;
640 		raid_ch.base_channel = calloc(params->num_base_bdevs, sizeof(struct spdk_io_channel *));
641 		SPDK_CU_ASSERT_FATAL(raid_ch.base_channel != NULL);
642 
643 		raid_ch.module_channel = raid5f_get_io_channel(r5f_info->raid_bdev);
644 		SPDK_CU_ASSERT_FATAL(raid_ch.module_channel);
645 
646 		test_fn(r5f_info->raid_bdev, &raid_ch);
647 
648 		spdk_put_io_channel(raid_ch.module_channel);
649 		poll_threads();
650 
651 		free(raid_ch.base_channel);
652 
653 		delete_raid5f(r5f_info);
654 	}
655 }
656 
657 #define RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, i) \
658 	for (i = 0; i < spdk_min(raid_bdev->num_base_bdevs, ((struct raid5f_info *)raid_bdev->module_private)->total_stripes); i++)
659 
660 struct test_request_conf {
661 	uint64_t stripe_offset_blocks;
662 	uint64_t num_blocks;
663 };
664 
665 static void
666 __test_raid5f_submit_read_request(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
667 {
668 	struct raid5f_info *r5f_info = raid_bdev->module_private;
669 	uint32_t strip_size = raid_bdev->strip_size;
670 	unsigned int i;
671 
672 	struct test_request_conf test_requests[] = {
673 		{ 0, 1 },
674 		{ 0, strip_size },
675 		{ 0, strip_size + 1 },
676 		{ 0, r5f_info->stripe_blocks },
677 		{ 1, 1 },
678 		{ 1, strip_size },
679 		{ 1, strip_size + 1 },
680 		{ strip_size, 1 },
681 		{ strip_size, strip_size },
682 		{ strip_size, strip_size + 1 },
683 		{ strip_size - 1, 1 },
684 		{ strip_size - 1, strip_size },
685 		{ strip_size - 1, strip_size + 1 },
686 		{ strip_size - 1, 2 },
687 	};
688 	for (i = 0; i < SPDK_COUNTOF(test_requests); i++) {
689 		struct test_request_conf *t = &test_requests[i];
690 		uint64_t stripe_index;
691 
692 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
693 			test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_READ,
694 						      stripe_index, t->stripe_offset_blocks, t->num_blocks);
695 		}
696 	}
697 }
698 static void
699 test_raid5f_submit_read_request(void)
700 {
701 	run_for_each_raid5f_config(__test_raid5f_submit_read_request);
702 }
703 
704 static void
705 __test_raid5f_stripe_request_map_iovecs(struct raid_bdev *raid_bdev,
706 					struct raid_bdev_io_channel *raid_ch)
707 {
708 	struct raid5f_io_channel *r5ch = spdk_io_channel_get_ctx(raid_ch->module_channel);
709 	size_t strip_bytes = raid_bdev->strip_size * raid_bdev->bdev.blocklen;
710 	struct raid_bdev_io raid_io = { .raid_bdev = raid_bdev };
711 	struct stripe_request *stripe_req;
712 	struct chunk *chunk;
713 	struct iovec iovs[] = {
714 		{ .iov_base = (void *)0x0ff0000, .iov_len = strip_bytes },
715 		{ .iov_base = (void *)0x1ff0000, .iov_len = strip_bytes / 2 },
716 		{ .iov_base = (void *)0x2ff0000, .iov_len = strip_bytes * 2 },
717 		{ .iov_base = (void *)0x3ff0000, .iov_len = strip_bytes * raid_bdev->num_base_bdevs },
718 	};
719 	size_t iovcnt = sizeof(iovs) / sizeof(iovs[0]);
720 	int ret;
721 
722 	stripe_req = raid5f_stripe_request_alloc(r5ch);
723 	SPDK_CU_ASSERT_FATAL(stripe_req != NULL);
724 
725 	stripe_req->parity_chunk = &stripe_req->chunks[raid5f_stripe_data_chunks_num(raid_bdev)];
726 	stripe_req->raid_io = &raid_io;
727 
728 	ret = raid5f_stripe_request_map_iovecs(stripe_req, iovs, iovcnt);
729 	CU_ASSERT(ret == 0);
730 
731 	chunk = &stripe_req->chunks[0];
732 	CU_ASSERT_EQUAL(chunk->iovcnt, 1);
733 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[0].iov_base);
734 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[0].iov_len);
735 
736 	chunk = &stripe_req->chunks[1];
737 	CU_ASSERT_EQUAL(chunk->iovcnt, 2);
738 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[1].iov_base);
739 	CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[1].iov_len);
740 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[2].iov_base);
741 	CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, iovs[2].iov_len / 4);
742 
743 	if (raid_bdev->num_base_bdevs > 3) {
744 		chunk = &stripe_req->chunks[2];
745 		CU_ASSERT_EQUAL(chunk->iovcnt, 1);
746 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + strip_bytes / 2);
747 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 2);
748 	}
749 	if (raid_bdev->num_base_bdevs > 4) {
750 		chunk = &stripe_req->chunks[3];
751 		CU_ASSERT_EQUAL(chunk->iovcnt, 2);
752 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_base, iovs[2].iov_base + (strip_bytes / 2) * 3);
753 		CU_ASSERT_EQUAL(chunk->iovs[0].iov_len, iovs[2].iov_len / 4);
754 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_base, iovs[3].iov_base);
755 		CU_ASSERT_EQUAL(chunk->iovs[1].iov_len, strip_bytes / 2);
756 	}
757 
758 	raid5f_stripe_request_free(stripe_req);
759 }
760 static void
761 test_raid5f_stripe_request_map_iovecs(void)
762 {
763 	run_for_each_raid5f_config(__test_raid5f_stripe_request_map_iovecs);
764 }
765 
766 static void
767 __test_raid5f_submit_full_stripe_write_request(struct raid_bdev *raid_bdev,
768 		struct raid_bdev_io_channel *raid_ch)
769 {
770 	struct raid5f_info *r5f_info = raid_bdev->module_private;
771 	uint64_t stripe_index;
772 
773 	RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
774 		test_raid5f_submit_rw_request(r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
775 					      stripe_index, 0, r5f_info->stripe_blocks);
776 	}
777 }
778 static void
779 test_raid5f_submit_full_stripe_write_request(void)
780 {
781 	run_for_each_raid5f_config(__test_raid5f_submit_full_stripe_write_request);
782 }
783 
784 static void
785 __test_raid5f_chunk_write_error(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
786 {
787 	struct raid5f_info *r5f_info = raid_bdev->module_private;
788 	struct raid_base_bdev_info *base_bdev_info;
789 	uint64_t stripe_index;
790 	struct raid_io_info io_info;
791 	enum test_bdev_error_type error_type;
792 
793 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_NOMEM; error_type++) {
794 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
795 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
796 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
797 					     stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
798 
799 				io_info.error.type = error_type;
800 				io_info.error.bdev = base_bdev_info->bdev;
801 
802 				test_raid5f_write_request(&io_info);
803 
804 				if (error_type == TEST_BDEV_ERROR_NOMEM) {
805 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_SUCCESS);
806 				} else {
807 					CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
808 				}
809 
810 				deinit_io_info(&io_info);
811 			}
812 		}
813 	}
814 }
815 static void
816 test_raid5f_chunk_write_error(void)
817 {
818 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error);
819 }
820 
821 struct chunk_write_error_with_enomem_ctx {
822 	enum test_bdev_error_type error_type;
823 	struct spdk_bdev *bdev;
824 };
825 
826 static void
827 chunk_write_error_with_enomem_cb(struct raid_io_info *io_info, void *_ctx)
828 {
829 	struct chunk_write_error_with_enomem_ctx *ctx = _ctx;
830 
831 	io_info->error.type = ctx->error_type;
832 	io_info->error.bdev = ctx->bdev;
833 }
834 
835 static void
836 __test_raid5f_chunk_write_error_with_enomem(struct raid_bdev *raid_bdev,
837 		struct raid_bdev_io_channel *raid_ch)
838 {
839 	struct raid5f_info *r5f_info = raid_bdev->module_private;
840 	struct raid_base_bdev_info *base_bdev_info;
841 	uint64_t stripe_index;
842 	struct raid_io_info io_info;
843 	enum test_bdev_error_type error_type;
844 	struct chunk_write_error_with_enomem_ctx on_enomem_cb_ctx;
845 
846 	for (error_type = TEST_BDEV_ERROR_SUBMIT; error_type <= TEST_BDEV_ERROR_COMPLETE; error_type++) {
847 		RAID5F_TEST_FOR_EACH_STRIPE(raid_bdev, stripe_index) {
848 			struct raid_base_bdev_info *base_bdev_info_last =
849 					&raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs - 1];
850 
851 			RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_bdev_info) {
852 				if (base_bdev_info == base_bdev_info_last) {
853 					continue;
854 				}
855 
856 				init_io_info(&io_info, r5f_info, raid_ch, SPDK_BDEV_IO_TYPE_WRITE,
857 					     stripe_index * r5f_info->stripe_blocks, r5f_info->stripe_blocks);
858 
859 				io_info.error.type = TEST_BDEV_ERROR_NOMEM;
860 				io_info.error.bdev = base_bdev_info->bdev;
861 				io_info.error.on_enomem_cb = chunk_write_error_with_enomem_cb;
862 				io_info.error.on_enomem_cb_ctx = &on_enomem_cb_ctx;
863 				on_enomem_cb_ctx.error_type = error_type;
864 				on_enomem_cb_ctx.bdev = base_bdev_info_last->bdev;
865 
866 				test_raid5f_write_request(&io_info);
867 
868 				CU_ASSERT(io_info.status == SPDK_BDEV_IO_STATUS_FAILED);
869 
870 				deinit_io_info(&io_info);
871 			}
872 		}
873 	}
874 }
875 static void
876 test_raid5f_chunk_write_error_with_enomem(void)
877 {
878 	run_for_each_raid5f_config(__test_raid5f_chunk_write_error_with_enomem);
879 }
880 
881 int
882 main(int argc, char **argv)
883 {
884 	CU_pSuite suite = NULL;
885 	unsigned int num_failures;
886 
887 	CU_set_error_action(CUEA_ABORT);
888 	CU_initialize_registry();
889 
890 	suite = CU_add_suite("raid5f", test_setup, test_cleanup);
891 	CU_ADD_TEST(suite, test_raid5f_start);
892 	CU_ADD_TEST(suite, test_raid5f_submit_read_request);
893 	CU_ADD_TEST(suite, test_raid5f_stripe_request_map_iovecs);
894 	CU_ADD_TEST(suite, test_raid5f_submit_full_stripe_write_request);
895 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error);
896 	CU_ADD_TEST(suite, test_raid5f_chunk_write_error_with_enomem);
897 
898 	allocate_threads(1);
899 	set_thread(0);
900 
901 	CU_basic_set_mode(CU_BRM_VERBOSE);
902 	CU_basic_run_tests();
903 	num_failures = CU_get_number_of_failures();
904 	CU_cleanup_registry();
905 
906 	free_threads();
907 
908 	return num_failures;
909 }
910