xref: /spdk/test/unit/lib/bdev/raid/raid0.c/raid0_ut.c (revision 1b9c5629935b12cf24549719cbb99e502c13dd64)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2024 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 
10 #include "common/lib/ut_multithread.c"
11 
12 #include "bdev/raid/raid0.c"
13 #include "../common.c"
14 
15 #define MAX_BASE_DRIVES 32
16 #define MAX_TEST_IO_RANGE (3 * 3 * 3 * (MAX_BASE_DRIVES + 5))
17 #define BLOCK_CNT (1024ul * 1024ul * 1024ul * 1024ul)
18 
19 /* Data structure to capture the output of IO for verification */
20 struct io_output {
21 	struct spdk_bdev_desc       *desc;
22 	struct spdk_io_channel      *ch;
23 	uint64_t                    offset_blocks;
24 	uint64_t                    num_blocks;
25 	spdk_bdev_io_completion_cb  cb;
26 	void                        *cb_arg;
27 	enum spdk_bdev_io_type      iotype;
28 	struct iovec                *iovs;
29 	int                         iovcnt;
30 	void                        *md_buf;
31 };
32 
33 struct raid_io_ranges {
34 	uint64_t lba;
35 	uint64_t nblocks;
36 };
37 
38 /* Globals */
39 struct io_output *g_io_output = NULL;
40 uint32_t g_io_output_index;
41 uint32_t g_io_comp_status;
42 bool g_child_io_status_flag;
43 TAILQ_HEAD(bdev, spdk_bdev);
44 uint32_t g_block_len;
45 uint32_t g_strip_size;
46 uint32_t g_max_io_size;
47 uint8_t g_max_base_drives;
48 struct raid_io_ranges g_io_ranges[MAX_TEST_IO_RANGE];
49 uint32_t g_io_range_idx;
50 bool g_enable_dif;
51 
52 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
53 DEFINE_STUB_V(raid_bdev_queue_io_wait, (struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
54 					struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn));
55 DEFINE_STUB(spdk_bdev_flush_blocks, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
56 		uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
57 		void *cb_arg), 0);
58 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
59 DEFINE_STUB(spdk_bdev_notify_blockcnt_change, int, (struct spdk_bdev *bdev, uint64_t size), 0);
60 
61 bool
spdk_bdev_is_md_interleaved(const struct spdk_bdev * bdev)62 spdk_bdev_is_md_interleaved(const struct spdk_bdev *bdev)
63 {
64 	return (bdev->md_len != 0) && bdev->md_interleave;
65 }
66 
67 bool
spdk_bdev_is_md_separate(const struct spdk_bdev * bdev)68 spdk_bdev_is_md_separate(const struct spdk_bdev *bdev)
69 {
70 	return (bdev->md_len != 0) && !bdev->md_interleave;
71 }
72 
73 uint32_t
spdk_bdev_get_md_size(const struct spdk_bdev * bdev)74 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
75 {
76 	return bdev->md_len;
77 }
78 
79 uint32_t
spdk_bdev_get_block_size(const struct spdk_bdev * bdev)80 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
81 {
82 	return bdev->blocklen;
83 }
84 
85 static int
set_test_opts(void)86 set_test_opts(void)
87 {
88 	g_max_base_drives = MAX_BASE_DRIVES;
89 	g_block_len = 4096;
90 	g_strip_size = 64;
91 	g_max_io_size = 1024;
92 	g_enable_dif = false;
93 
94 	return 0;
95 }
96 
97 static int
set_test_opts_dif(void)98 set_test_opts_dif(void)
99 {
100 	set_test_opts();
101 	g_enable_dif = true;
102 
103 	return 0;
104 }
105 
106 /* Set globals before every test run */
107 static void
set_globals(void)108 set_globals(void)
109 {
110 	uint32_t max_splits;
111 
112 	if (g_max_io_size < g_strip_size) {
113 		max_splits = 2;
114 	} else {
115 		max_splits = (g_max_io_size / g_strip_size) + 1;
116 	}
117 	if (max_splits < g_max_base_drives) {
118 		max_splits = g_max_base_drives;
119 	}
120 
121 	g_io_output = calloc(max_splits, sizeof(struct io_output));
122 	SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
123 	g_io_output_index = 0;
124 	g_io_comp_status = 0;
125 	g_child_io_status_flag = true;
126 }
127 
128 /* Reset globals */
129 static void
reset_globals(void)130 reset_globals(void)
131 {
132 	if (g_io_output) {
133 		free(g_io_output);
134 		g_io_output = NULL;
135 	}
136 }
137 
138 static void
generate_dif(struct iovec * iovs,int iovcnt,void * md_buf,uint64_t offset_blocks,uint32_t num_blocks,struct spdk_bdev * bdev)139 generate_dif(struct iovec *iovs, int iovcnt, void *md_buf,
140 	     uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
141 {
142 	struct spdk_dif_ctx dif_ctx;
143 	int rc;
144 	struct spdk_dif_ctx_init_ext_opts dif_opts;
145 	spdk_dif_type_t dif_type;
146 	bool md_interleaved;
147 	struct iovec md_iov;
148 
149 	dif_type = spdk_bdev_get_dif_type(bdev);
150 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
151 
152 	if (dif_type == SPDK_DIF_DISABLE) {
153 		return;
154 	}
155 
156 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
157 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
158 	rc = spdk_dif_ctx_init(&dif_ctx,
159 			       spdk_bdev_get_block_size(bdev),
160 			       spdk_bdev_get_md_size(bdev),
161 			       md_interleaved,
162 			       spdk_bdev_is_dif_head_of_md(bdev),
163 			       dif_type,
164 			       bdev->dif_check_flags,
165 			       offset_blocks,
166 			       0xFFFF, 0x123, 0, 0, &dif_opts);
167 	SPDK_CU_ASSERT_FATAL(rc == 0);
168 
169 	if (!md_interleaved) {
170 		md_iov.iov_base = md_buf;
171 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
172 
173 		rc = spdk_dix_generate(iovs, iovcnt, &md_iov, num_blocks, &dif_ctx);
174 		SPDK_CU_ASSERT_FATAL(rc == 0);
175 	}
176 }
177 
178 static void
verify_dif(struct iovec * iovs,int iovcnt,void * md_buf,uint64_t offset_blocks,uint32_t num_blocks,struct spdk_bdev * bdev)179 verify_dif(struct iovec *iovs, int iovcnt, void *md_buf,
180 	   uint64_t offset_blocks, uint32_t num_blocks, struct spdk_bdev *bdev)
181 {
182 	struct spdk_dif_ctx dif_ctx;
183 	int rc;
184 	struct spdk_dif_ctx_init_ext_opts dif_opts;
185 	struct spdk_dif_error errblk;
186 	spdk_dif_type_t dif_type;
187 	bool md_interleaved;
188 	struct iovec md_iov;
189 
190 	dif_type = spdk_bdev_get_dif_type(bdev);
191 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
192 
193 	if (dif_type == SPDK_DIF_DISABLE) {
194 		return;
195 	}
196 
197 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
198 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
199 	rc = spdk_dif_ctx_init(&dif_ctx,
200 			       spdk_bdev_get_block_size(bdev),
201 			       spdk_bdev_get_md_size(bdev),
202 			       md_interleaved,
203 			       spdk_bdev_is_dif_head_of_md(bdev),
204 			       dif_type,
205 			       bdev->dif_check_flags,
206 			       offset_blocks,
207 			       0xFFFF, 0x123, 0, 0, &dif_opts);
208 	SPDK_CU_ASSERT_FATAL(rc == 0);
209 
210 	if (!md_interleaved) {
211 		md_iov.iov_base = md_buf;
212 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
213 
214 		rc = spdk_dix_verify(iovs, iovcnt,
215 				     &md_iov, num_blocks, &dif_ctx, &errblk);
216 		SPDK_CU_ASSERT_FATAL(rc == 0);
217 	}
218 }
219 
220 static void
remap_dif(void * md_buf,uint64_t num_blocks,struct spdk_bdev * bdev,uint32_t remapped_offset)221 remap_dif(void *md_buf, uint64_t num_blocks, struct spdk_bdev *bdev, uint32_t remapped_offset)
222 {
223 	struct spdk_dif_ctx dif_ctx;
224 	int rc;
225 	struct spdk_dif_ctx_init_ext_opts dif_opts;
226 	struct spdk_dif_error errblk;
227 	spdk_dif_type_t dif_type;
228 	bool md_interleaved;
229 	struct iovec md_iov;
230 
231 	dif_type = spdk_bdev_get_dif_type(bdev);
232 	md_interleaved = spdk_bdev_is_md_interleaved(bdev);
233 
234 	if (dif_type == SPDK_DIF_DISABLE) {
235 		return;
236 	}
237 
238 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
239 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
240 	rc = spdk_dif_ctx_init(&dif_ctx,
241 			       spdk_bdev_get_block_size(bdev),
242 			       spdk_bdev_get_md_size(bdev),
243 			       md_interleaved,
244 			       spdk_bdev_is_dif_head_of_md(bdev),
245 			       dif_type,
246 			       bdev->dif_check_flags,
247 			       0,
248 			       0xFFFF, 0x123, 0, 0, &dif_opts);
249 	SPDK_CU_ASSERT_FATAL(rc == 0);
250 
251 	if (!md_interleaved) {
252 		md_iov.iov_base = md_buf;
253 		md_iov.iov_len	= spdk_bdev_get_md_size(bdev) * num_blocks;
254 
255 		spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset);
256 
257 		rc = spdk_dix_remap_ref_tag(&md_iov, num_blocks, &dif_ctx, &errblk, false);
258 		SPDK_CU_ASSERT_FATAL(rc == 0);
259 	}
260 }
261 
262 /* Store the IO completion status in global variable to verify by various tests */
263 void
raid_test_bdev_io_complete(struct raid_bdev_io * raid_io,enum spdk_bdev_io_status status)264 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
265 {
266 	g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
267 }
268 
269 int
raid_bdev_remap_dix_reftag(void * md_buf,uint64_t num_blocks,struct spdk_bdev * bdev,uint32_t remapped_offset)270 raid_bdev_remap_dix_reftag(void *md_buf, uint64_t num_blocks,
271 			   struct spdk_bdev *bdev, uint32_t remapped_offset)
272 {
273 	remap_dif(md_buf, num_blocks, bdev, remapped_offset);
274 
275 	return 0;
276 }
277 
278 int
raid_bdev_verify_dix_reftag(struct iovec * iovs,int iovcnt,void * md_buf,uint64_t num_blocks,struct spdk_bdev * bdev,uint32_t offset_blocks)279 raid_bdev_verify_dix_reftag(struct iovec *iovs, int iovcnt, void *md_buf,
280 			    uint64_t num_blocks, struct spdk_bdev *bdev, uint32_t offset_blocks)
281 {
282 	verify_dif(iovs, iovcnt, md_buf, offset_blocks, num_blocks, bdev);
283 
284 	return 0;
285 }
286 
287 static void
set_io_output(struct io_output * output,struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg,enum spdk_bdev_io_type iotype,struct iovec * iovs,int iovcnt,void * md)288 set_io_output(struct io_output *output,
289 	      struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
290 	      uint64_t offset_blocks, uint64_t num_blocks,
291 	      spdk_bdev_io_completion_cb cb, void *cb_arg,
292 	      enum spdk_bdev_io_type iotype, struct iovec *iovs,
293 	      int iovcnt, void *md)
294 {
295 	output->desc = desc;
296 	output->ch = ch;
297 	output->offset_blocks = offset_blocks;
298 	output->num_blocks = num_blocks;
299 	output->cb = cb;
300 	output->cb_arg = cb_arg;
301 	output->iotype = iotype;
302 	output->iovs = iovs;
303 	output->iovcnt = iovcnt;
304 	output->md_buf = md;
305 }
306 
307 static struct spdk_bdev_io *
get_child_io(struct io_output * output)308 get_child_io(struct io_output *output)
309 {
310 	struct spdk_bdev_io *bdev_io;
311 
312 	bdev_io = calloc(1, sizeof(*bdev_io));
313 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
314 
315 	bdev_io->bdev = spdk_bdev_desc_get_bdev(output->desc);
316 	bdev_io->type = output->iotype;
317 	bdev_io->u.bdev.offset_blocks = output->offset_blocks;
318 	bdev_io->u.bdev.num_blocks = output->num_blocks;
319 	bdev_io->u.bdev.iovs = output->iovs;
320 	bdev_io->u.bdev.iovcnt = output->iovcnt;
321 	bdev_io->u.bdev.md_buf = output->md_buf;
322 
323 	return bdev_io;
324 }
325 
326 static void
child_io_complete(struct spdk_bdev_io * bdev_io,spdk_bdev_io_completion_cb cb,void * cb_arg)327 child_io_complete(struct spdk_bdev_io *bdev_io, spdk_bdev_io_completion_cb cb, void *cb_arg)
328 {
329 	if (g_child_io_status_flag && bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
330 		verify_dif(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.md_buf,
331 			   bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks, bdev_io->bdev);
332 	}
333 
334 	cb(bdev_io, g_child_io_status_flag, cb_arg);
335 }
336 
337 int
spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,struct iovec * iov,int iovcnt,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg,struct spdk_bdev_ext_io_opts * opts)338 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
339 			    struct iovec *iov, int iovcnt,
340 			    uint64_t offset_blocks, uint64_t num_blocks,
341 			    spdk_bdev_io_completion_cb cb, void *cb_arg,
342 			    struct spdk_bdev_ext_io_opts *opts)
343 {
344 	struct io_output *output = &g_io_output[g_io_output_index];
345 	struct spdk_bdev_io *child_io;
346 
347 	if (g_max_io_size < g_strip_size) {
348 		SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
349 	} else {
350 		SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
351 	}
352 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
353 		      SPDK_BDEV_IO_TYPE_WRITE, iov, iovcnt, opts->metadata);
354 	g_io_output_index++;
355 
356 	child_io = get_child_io(output);
357 	child_io_complete(child_io, cb, cb_arg);
358 
359 	return 0;
360 }
361 
362 int
spdk_bdev_unmap_blocks(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg)363 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
364 		       uint64_t offset_blocks, uint64_t num_blocks,
365 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
366 {
367 	struct io_output *output = &g_io_output[g_io_output_index];
368 	struct spdk_bdev_io *child_io;
369 
370 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
371 		      SPDK_BDEV_IO_TYPE_UNMAP, NULL, 0, NULL);
372 	g_io_output_index++;
373 
374 	child_io = get_child_io(output);
375 	child_io_complete(child_io, cb, cb_arg);
376 
377 	return 0;
378 }
379 
380 void
spdk_bdev_free_io(struct spdk_bdev_io * bdev_io)381 spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
382 {
383 	if (bdev_io) {
384 		free(bdev_io);
385 	}
386 }
387 
388 int
spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,struct iovec * iov,int iovcnt,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg,struct spdk_bdev_ext_io_opts * opts)389 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
390 			   struct iovec *iov, int iovcnt,
391 			   uint64_t offset_blocks, uint64_t num_blocks,
392 			   spdk_bdev_io_completion_cb cb, void *cb_arg,
393 			   struct spdk_bdev_ext_io_opts *opts)
394 {
395 	struct io_output *output = &g_io_output[g_io_output_index];
396 	struct spdk_bdev_io *child_io;
397 
398 	SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
399 	set_io_output(output, desc, ch, offset_blocks, num_blocks, cb, cb_arg,
400 		      SPDK_BDEV_IO_TYPE_READ, iov, iovcnt, opts->metadata);
401 	generate_dif(iov, iovcnt, opts->metadata, offset_blocks, num_blocks,
402 		     spdk_bdev_desc_get_bdev(desc));
403 	g_io_output_index++;
404 
405 	child_io = get_child_io(output);
406 	child_io_complete(child_io, cb, cb_arg);
407 
408 	return 0;
409 }
410 
411 static void
raid_io_cleanup(struct raid_bdev_io * raid_io)412 raid_io_cleanup(struct raid_bdev_io *raid_io)
413 {
414 	if (raid_io->iovs) {
415 		int i;
416 
417 		for (i = 0; i < raid_io->iovcnt; i++) {
418 			free(raid_io->iovs[i].iov_base);
419 		}
420 		free(raid_io->iovs);
421 	}
422 
423 	free(raid_io->md_buf);
424 	free(raid_io);
425 }
426 
427 static void
raid_io_initialize(struct raid_bdev_io * raid_io,struct raid_bdev_io_channel * raid_ch,struct raid_bdev * raid_bdev,uint64_t lba,uint64_t blocks,int16_t iotype)428 raid_io_initialize(struct raid_bdev_io *raid_io, struct raid_bdev_io_channel *raid_ch,
429 		   struct raid_bdev *raid_bdev, uint64_t lba, uint64_t blocks, int16_t iotype)
430 {
431 	struct iovec *iovs = NULL;
432 	int iovcnt = 0;
433 	void *md_buf = NULL;
434 
435 	if (iotype != SPDK_BDEV_IO_TYPE_UNMAP && iotype != SPDK_BDEV_IO_TYPE_FLUSH) {
436 		iovcnt = 1;
437 		iovs = calloc(iovcnt, sizeof(struct iovec));
438 		SPDK_CU_ASSERT_FATAL(iovs != NULL);
439 		iovs->iov_len = blocks * g_block_len;
440 		iovs->iov_base = calloc(1, iovs->iov_len);
441 		SPDK_CU_ASSERT_FATAL(iovs->iov_base != NULL);
442 
443 		if (spdk_bdev_is_md_separate(&raid_bdev->bdev)) {
444 			md_buf = calloc(1, blocks * spdk_bdev_get_md_size(&raid_bdev->bdev));
445 			SPDK_CU_ASSERT_FATAL(md_buf != NULL);
446 		}
447 	}
448 
449 	raid_test_bdev_io_init(raid_io, raid_bdev, raid_ch, iotype, lba, blocks, iovs, iovcnt, md_buf);
450 }
451 
452 static void
verify_io(struct raid_bdev_io * raid_io,uint32_t io_status)453 verify_io(struct raid_bdev_io *raid_io, uint32_t io_status)
454 {
455 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
456 	uint8_t num_base_drives = raid_bdev->num_base_bdevs;
457 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
458 	uint64_t start_strip = raid_io->offset_blocks >> strip_shift;
459 	uint64_t end_strip = (raid_io->offset_blocks + raid_io->num_blocks - 1) >>
460 			     strip_shift;
461 	uint32_t splits_reqd = (end_strip - start_strip + 1);
462 	uint32_t strip;
463 	uint64_t pd_strip;
464 	uint8_t pd_idx;
465 	uint32_t offset_in_strip;
466 	uint64_t pd_lba;
467 	uint64_t pd_blocks;
468 	uint32_t index = 0;
469 	struct io_output *output;
470 
471 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
472 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
473 
474 	CU_ASSERT(splits_reqd == g_io_output_index);
475 	for (strip = start_strip; strip <= end_strip; strip++, index++) {
476 		pd_strip = strip / num_base_drives;
477 		pd_idx = strip % num_base_drives;
478 		if (strip == start_strip) {
479 			offset_in_strip = raid_io->offset_blocks & (g_strip_size - 1);
480 			pd_lba = (pd_strip << strip_shift) + offset_in_strip;
481 			if (strip == end_strip) {
482 				pd_blocks = raid_io->num_blocks;
483 			} else {
484 				pd_blocks = g_strip_size - offset_in_strip;
485 			}
486 		} else if (strip == end_strip) {
487 			pd_lba = pd_strip << strip_shift;
488 			pd_blocks = ((raid_io->offset_blocks + raid_io->num_blocks - 1) &
489 				     (g_strip_size - 1)) + 1;
490 		} else {
491 			pd_lba = pd_strip << raid_bdev->strip_size_shift;
492 			pd_blocks = raid_bdev->strip_size;
493 		}
494 		output = &g_io_output[index];
495 		CU_ASSERT(pd_lba == output->offset_blocks);
496 		CU_ASSERT(pd_blocks == output->num_blocks);
497 		CU_ASSERT(raid_bdev_channel_get_base_channel(raid_io->raid_ch, pd_idx) == output->ch);
498 		CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == output->desc);
499 		CU_ASSERT(raid_io->type == output->iotype);
500 		if (raid_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
501 			verify_dif(output->iovs, output->iovcnt, output->md_buf,
502 				   output->offset_blocks, output->num_blocks,
503 				   spdk_bdev_desc_get_bdev(raid_bdev->base_bdev_info[pd_idx].desc));
504 		}
505 	}
506 	CU_ASSERT(g_io_comp_status == io_status);
507 }
508 
509 static void
verify_io_without_payload(struct raid_bdev_io * raid_io,uint32_t io_status)510 verify_io_without_payload(struct raid_bdev_io *raid_io, uint32_t io_status)
511 {
512 	struct raid_bdev *raid_bdev = raid_io->raid_bdev;
513 	uint8_t num_base_drives = raid_bdev->num_base_bdevs;
514 	uint32_t strip_shift = spdk_u32log2(g_strip_size);
515 	uint64_t start_offset_in_strip = raid_io->offset_blocks % g_strip_size;
516 	uint64_t end_offset_in_strip = (raid_io->offset_blocks + raid_io->num_blocks - 1) %
517 				       g_strip_size;
518 	uint64_t start_strip = raid_io->offset_blocks >> strip_shift;
519 	uint64_t end_strip = (raid_io->offset_blocks + raid_io->num_blocks - 1) >>
520 			     strip_shift;
521 	uint8_t n_disks_involved;
522 	uint64_t start_strip_disk_idx;
523 	uint64_t end_strip_disk_idx;
524 	uint64_t nblocks_in_start_disk;
525 	uint64_t offset_in_start_disk;
526 	uint8_t disk_idx;
527 	uint64_t base_io_idx;
528 	uint64_t sum_nblocks = 0;
529 	struct io_output *output;
530 
531 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
532 	SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
533 	SPDK_CU_ASSERT_FATAL(raid_io->type != SPDK_BDEV_IO_TYPE_READ);
534 	SPDK_CU_ASSERT_FATAL(raid_io->type != SPDK_BDEV_IO_TYPE_WRITE);
535 
536 	n_disks_involved = spdk_min(end_strip - start_strip + 1, num_base_drives);
537 	CU_ASSERT(n_disks_involved == g_io_output_index);
538 
539 	start_strip_disk_idx = start_strip % num_base_drives;
540 	end_strip_disk_idx = end_strip % num_base_drives;
541 
542 	offset_in_start_disk = g_io_output[0].offset_blocks;
543 	nblocks_in_start_disk = g_io_output[0].num_blocks;
544 
545 	for (base_io_idx = 0, disk_idx = start_strip_disk_idx; base_io_idx < n_disks_involved;
546 	     base_io_idx++, disk_idx++) {
547 		uint64_t start_offset_in_disk;
548 		uint64_t end_offset_in_disk;
549 
550 		output = &g_io_output[base_io_idx];
551 
552 		/* round disk_idx */
553 		if (disk_idx >= num_base_drives) {
554 			disk_idx %= num_base_drives;
555 		}
556 
557 		/* start_offset_in_disk aligned in strip check:
558 		 * The first base io has a same start_offset_in_strip with the whole raid io.
559 		 * Other base io should have aligned start_offset_in_strip which is 0.
560 		 */
561 		start_offset_in_disk = output->offset_blocks;
562 		if (base_io_idx == 0) {
563 			CU_ASSERT(start_offset_in_disk % g_strip_size == start_offset_in_strip);
564 		} else {
565 			CU_ASSERT(start_offset_in_disk % g_strip_size == 0);
566 		}
567 
568 		/* end_offset_in_disk aligned in strip check:
569 		 * Base io on disk at which end_strip is located, has a same end_offset_in_strip
570 		 * with the whole raid io.
571 		 * Other base io should have aligned end_offset_in_strip.
572 		 */
573 		end_offset_in_disk = output->offset_blocks + output->num_blocks - 1;
574 		if (disk_idx == end_strip_disk_idx) {
575 			CU_ASSERT(end_offset_in_disk % g_strip_size == end_offset_in_strip);
576 		} else {
577 			CU_ASSERT(end_offset_in_disk % g_strip_size == g_strip_size - 1);
578 		}
579 
580 		/* start_offset_in_disk compared with start_disk.
581 		 * 1. For disk_idx which is larger than start_strip_disk_idx: Its start_offset_in_disk
582 		 *    mustn't be larger than the start offset of start_offset_in_disk; And the gap
583 		 *    must be less than strip size.
584 		 * 2. For disk_idx which is less than start_strip_disk_idx, Its start_offset_in_disk
585 		 *    must be larger than the start offset of start_offset_in_disk; And the gap mustn't
586 		 *    be less than strip size.
587 		 */
588 		if (disk_idx > start_strip_disk_idx) {
589 			CU_ASSERT(start_offset_in_disk <= offset_in_start_disk);
590 			CU_ASSERT(offset_in_start_disk - start_offset_in_disk < g_strip_size);
591 		} else if (disk_idx < start_strip_disk_idx) {
592 			CU_ASSERT(start_offset_in_disk > offset_in_start_disk);
593 			CU_ASSERT(output->offset_blocks - offset_in_start_disk <= g_strip_size);
594 		}
595 
596 		/* nblocks compared with start_disk:
597 		 * The gap between them must be within a strip size.
598 		 */
599 		if (output->num_blocks <= nblocks_in_start_disk) {
600 			CU_ASSERT(nblocks_in_start_disk - output->num_blocks <= g_strip_size);
601 		} else {
602 			CU_ASSERT(output->num_blocks - nblocks_in_start_disk < g_strip_size);
603 		}
604 
605 		sum_nblocks += output->num_blocks;
606 
607 		CU_ASSERT(raid_bdev_channel_get_base_channel(raid_io->raid_ch, disk_idx) == output->ch);
608 		CU_ASSERT(raid_bdev->base_bdev_info[disk_idx].desc == output->desc);
609 		CU_ASSERT(raid_io->type == output->iotype);
610 	}
611 
612 	/* Sum of each nblocks should be same with raid bdev_io */
613 	CU_ASSERT(raid_io->num_blocks == sum_nblocks);
614 
615 	CU_ASSERT(g_io_comp_status == io_status);
616 }
617 
618 static struct raid_bdev *
create_raid0(void)619 create_raid0(void)
620 {
621 	struct raid_bdev *raid_bdev;
622 	struct raid_base_bdev_info *base_info;
623 	struct raid_params params = {
624 		.num_base_bdevs = g_max_base_drives,
625 		.base_bdev_blockcnt = BLOCK_CNT,
626 		.base_bdev_blocklen = g_block_len,
627 		.strip_size = g_strip_size,
628 		.md_type = g_enable_dif ? RAID_PARAMS_MD_SEPARATE : RAID_PARAMS_MD_NONE,
629 	};
630 
631 	raid_bdev = raid_test_create_raid_bdev(&params, &g_raid0_module);
632 
633 	SPDK_CU_ASSERT_FATAL(raid0_start(raid_bdev) == 0);
634 
635 	if (g_enable_dif) {
636 		raid_bdev->bdev.dif_type = SPDK_DIF_TYPE1;
637 		raid_bdev->bdev.dif_check_flags =
638 			SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK |
639 			SPDK_DIF_FLAGS_APPTAG_CHECK;
640 
641 		RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
642 			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(base_info->desc);
643 
644 			bdev->dif_type = raid_bdev->bdev.dif_type;
645 			bdev->dif_check_flags = raid_bdev->bdev.dif_check_flags;
646 		}
647 	}
648 
649 	return raid_bdev;
650 }
651 
652 static void
delete_raid0(struct raid_bdev * raid_bdev)653 delete_raid0(struct raid_bdev *raid_bdev)
654 {
655 	raid_test_delete_raid_bdev(raid_bdev);
656 }
657 
658 static void
test_write_io(void)659 test_write_io(void)
660 {
661 	struct raid_bdev *raid_bdev;
662 	uint8_t i;
663 	uint64_t io_len;
664 	uint64_t lba = 0;
665 	struct raid_bdev_io *raid_io;
666 	struct raid_bdev_io_channel *raid_ch;
667 
668 	set_globals();
669 
670 	raid_bdev = create_raid0();
671 	raid_ch = raid_test_create_io_channel(raid_bdev);
672 
673 	/* test 2 IO sizes based on global strip size set earlier */
674 	for (i = 0; i < 2; i++) {
675 		raid_io = calloc(1, sizeof(*raid_io));
676 		SPDK_CU_ASSERT_FATAL(raid_io != NULL);
677 		io_len = (g_strip_size / 2) << i;
678 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
679 		lba += g_strip_size;
680 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
681 		g_io_output_index = 0;
682 		generate_dif(raid_io->iovs, raid_io->iovcnt, raid_io->md_buf,
683 			     raid_io->offset_blocks, raid_io->num_blocks, &raid_bdev->bdev);
684 		raid0_submit_rw_request(raid_io);
685 		verify_io(raid_io, g_child_io_status_flag);
686 		raid_io_cleanup(raid_io);
687 	}
688 
689 	raid_test_destroy_io_channel(raid_ch);
690 	delete_raid0(raid_bdev);
691 
692 	reset_globals();
693 }
694 
695 static void
test_read_io(void)696 test_read_io(void)
697 {
698 	struct raid_bdev *raid_bdev;
699 	uint8_t i;
700 	uint64_t io_len;
701 	uint64_t lba = 0;
702 	struct raid_bdev_io *raid_io;
703 	struct raid_bdev_io_channel *raid_ch;
704 
705 	set_globals();
706 
707 	raid_bdev = create_raid0();
708 	raid_ch = raid_test_create_io_channel(raid_bdev);
709 
710 	/* test 2 IO sizes based on global strip size set earlier */
711 	lba = 0;
712 	for (i = 0; i < 2; i++) {
713 		raid_io = calloc(1, sizeof(*raid_io));
714 		SPDK_CU_ASSERT_FATAL(raid_io != NULL);
715 		io_len = (g_strip_size / 2) << i;
716 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
717 		lba += g_strip_size;
718 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
719 		g_io_output_index = 0;
720 		raid0_submit_rw_request(raid_io);
721 		verify_io(raid_io, g_child_io_status_flag);
722 		raid_io_cleanup(raid_io);
723 	}
724 
725 	raid_test_destroy_io_channel(raid_ch);
726 	delete_raid0(raid_bdev);
727 
728 	reset_globals();
729 }
730 
731 static void
raid_bdev_io_generate_by_strips(uint64_t n_strips)732 raid_bdev_io_generate_by_strips(uint64_t n_strips)
733 {
734 	uint64_t lba;
735 	uint64_t nblocks;
736 	uint64_t start_offset;
737 	uint64_t end_offset;
738 	uint64_t offsets_in_strip[3];
739 	uint64_t start_bdev_idx;
740 	uint64_t start_bdev_offset;
741 	uint64_t start_bdev_idxs[3];
742 	int i, j, l;
743 
744 	/* 3 different situations of offset in strip */
745 	offsets_in_strip[0] = 0;
746 	offsets_in_strip[1] = g_strip_size >> 1;
747 	offsets_in_strip[2] = g_strip_size - 1;
748 
749 	/* 3 different situations of start_bdev_idx */
750 	start_bdev_idxs[0] = 0;
751 	start_bdev_idxs[1] = g_max_base_drives >> 1;
752 	start_bdev_idxs[2] = g_max_base_drives - 1;
753 
754 	/* consider different offset in strip */
755 	for (i = 0; i < 3; i++) {
756 		start_offset = offsets_in_strip[i];
757 		for (j = 0; j < 3; j++) {
758 			end_offset = offsets_in_strip[j];
759 			if (n_strips == 1 && start_offset > end_offset) {
760 				continue;
761 			}
762 
763 			/* consider at which base_bdev lba is started. */
764 			for (l = 0; l < 3; l++) {
765 				start_bdev_idx = start_bdev_idxs[l];
766 				start_bdev_offset = start_bdev_idx * g_strip_size;
767 				lba = start_bdev_offset + start_offset;
768 				nblocks = (n_strips - 1) * g_strip_size + end_offset - start_offset + 1;
769 
770 				g_io_ranges[g_io_range_idx].lba = lba;
771 				g_io_ranges[g_io_range_idx].nblocks = nblocks;
772 
773 				SPDK_CU_ASSERT_FATAL(g_io_range_idx < MAX_TEST_IO_RANGE);
774 				g_io_range_idx++;
775 			}
776 		}
777 	}
778 }
779 
780 static void
raid_bdev_io_generate(void)781 raid_bdev_io_generate(void)
782 {
783 	uint64_t n_strips;
784 	uint64_t n_strips_span = g_max_base_drives;
785 	uint64_t n_strips_times[5] = {g_max_base_drives + 1, g_max_base_drives * 2 - 1,
786 				      g_max_base_drives * 2, g_max_base_drives * 3,
787 				      g_max_base_drives * 4
788 				     };
789 	uint32_t i;
790 
791 	g_io_range_idx = 0;
792 
793 	/* consider different number of strips from 1 to strips spanned base bdevs,
794 	 * and even to times of strips spanned base bdevs
795 	 */
796 	for (n_strips = 1; n_strips < n_strips_span; n_strips++) {
797 		raid_bdev_io_generate_by_strips(n_strips);
798 	}
799 
800 	for (i = 0; i < SPDK_COUNTOF(n_strips_times); i++) {
801 		n_strips = n_strips_times[i];
802 		raid_bdev_io_generate_by_strips(n_strips);
803 	}
804 }
805 
806 static void
test_unmap_io(void)807 test_unmap_io(void)
808 {
809 	struct raid_bdev *raid_bdev;
810 	uint32_t count;
811 	uint64_t io_len;
812 	uint64_t lba;
813 	struct raid_bdev_io *raid_io;
814 	struct raid_bdev_io_channel *raid_ch;
815 
816 	set_globals();
817 
818 	raid_bdev = create_raid0();
819 	raid_ch = raid_test_create_io_channel(raid_bdev);
820 
821 	raid_bdev_io_generate();
822 	for (count = 0; count < g_io_range_idx; count++) {
823 		raid_io = calloc(1, sizeof(*raid_io));
824 		SPDK_CU_ASSERT_FATAL(raid_io != NULL);
825 		io_len = g_io_ranges[count].nblocks;
826 		lba = g_io_ranges[count].lba;
827 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, io_len, SPDK_BDEV_IO_TYPE_UNMAP);
828 		memset(g_io_output, 0, g_max_base_drives * sizeof(struct io_output));
829 		g_io_output_index = 0;
830 		raid0_submit_null_payload_request(raid_io);
831 		verify_io_without_payload(raid_io, g_child_io_status_flag);
832 		raid_io_cleanup(raid_io);
833 	}
834 
835 	raid_test_destroy_io_channel(raid_ch);
836 	delete_raid0(raid_bdev);
837 
838 	reset_globals();
839 }
840 
841 /* Test IO failures */
842 static void
test_io_failure(void)843 test_io_failure(void)
844 {
845 	struct raid_bdev *raid_bdev;
846 	uint32_t count;
847 	uint64_t io_len;
848 	uint64_t lba;
849 	struct raid_bdev_io *raid_io;
850 	struct raid_bdev_io_channel *raid_ch;
851 
852 	set_globals();
853 
854 	raid_bdev = create_raid0();
855 	raid_ch = raid_test_create_io_channel(raid_bdev);
856 
857 	lba = 0;
858 	g_child_io_status_flag = false;
859 	for (count = 0; count < 1; count++) {
860 		raid_io = calloc(1, sizeof(*raid_io));
861 		SPDK_CU_ASSERT_FATAL(raid_io != NULL);
862 		io_len = (g_strip_size / 2) << count;
863 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
864 		lba += g_strip_size;
865 		memset(g_io_output, 0, ((g_max_io_size / g_strip_size) + 1) * sizeof(struct io_output));
866 		g_io_output_index = 0;
867 		generate_dif(raid_io->iovs, raid_io->iovcnt, raid_io->md_buf,
868 			     raid_io->offset_blocks, raid_io->num_blocks, &raid_bdev->bdev);
869 		raid0_submit_rw_request(raid_io);
870 		verify_io(raid_io, g_child_io_status_flag);
871 		raid_io_cleanup(raid_io);
872 	}
873 
874 	raid_test_destroy_io_channel(raid_ch);
875 	delete_raid0(raid_bdev);
876 
877 	reset_globals();
878 }
879 
880 int
main(int argc,char ** argv)881 main(int argc, char **argv)
882 {
883 	unsigned int    num_failures;
884 
885 	CU_TestInfo tests[] = {
886 		{ "test_write_io", test_write_io },
887 		{ "test_read_io", test_read_io },
888 		{ "test_unmap_io", test_unmap_io },
889 		{ "test_io_failure", test_io_failure },
890 		CU_TEST_INFO_NULL,
891 	};
892 	CU_SuiteInfo suites[] = {
893 		{ "raid0", set_test_opts, NULL, NULL, NULL, tests },
894 		{ "raid0_dif", set_test_opts_dif, NULL, NULL, NULL, tests },
895 		CU_SUITE_INFO_NULL,
896 	};
897 
898 	CU_initialize_registry();
899 	CU_register_suites(suites);
900 
901 	allocate_threads(1);
902 	set_thread(0);
903 
904 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
905 	CU_cleanup_registry();
906 
907 	free_threads();
908 
909 	return num_failures;
910 }
911