xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision 6f338d4bf3a8a91b7abe377a605a321ea2b05bf7)
1 #include "spdk/stdinc.h"
2 #include "spdk_cunit.h"
3 #include "spdk/env.h"
4 #include "thread/thread_internal.h"
5 #include "spdk_internal/mock.h"
6 
7 #include "bdev/raid/bdev_raid.h"
8 #include "bdev/raid/concat.c"
9 
10 #define BLOCK_LEN (4096)
11 
12 enum CONCAT_IO_TYPE {
13 	CONCAT_NONE = 0,
14 	CONCAT_WRITEV,
15 	CONCAT_READV,
16 	CONCAT_FLUSH,
17 	CONCAT_UNMAP,
18 };
19 
20 struct spdk_bdev_desc {
21 	struct spdk_bdev *bdev;
22 };
23 
24 #define MAX_RECORDS (10)
25 /*
26  * Store the information of io requests sent to the underlying bdevs.
27  * For a single null payload request to the concat bdev,
28  * we may send multiple requests to the underling bdevs,
29  * so we store the io request information to arrays.
30  */
31 struct req_records {
32 	uint64_t offset_blocks[MAX_RECORDS];
33 	uint64_t num_blocks[MAX_RECORDS];
34 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
35 	int count;
36 	void *md;
37 } g_req_records;
38 
39 /*
40  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
41  * functions will return 0.
42  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
43  * functions will return -ENOMEM.
44  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
45  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
46  * set g_succeed to true, then the IO will succeed next time.
47  */
48 bool g_succeed;
49 
50 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
51 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
52 				      enum spdk_bdev_io_status status));
53 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
54 DEFINE_STUB(raid_bdev_io_complete_part, bool,
55 	    (struct raid_bdev_io *raid_io, uint64_t completed,
56 	     enum spdk_bdev_io_status status),
57 	    true);
58 
59 int
60 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
61 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
62 			   spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
63 {
64 	if (g_succeed) {
65 		int i = g_req_records.count;
66 
67 		g_req_records.offset_blocks[i] = offset_blocks;
68 		g_req_records.num_blocks[i] = num_blocks;
69 		g_req_records.io_type[i] = CONCAT_READV;
70 		g_req_records.count++;
71 		cb(NULL, true, cb_arg);
72 		g_req_records.md = opts->metadata;
73 		return 0;
74 	} else {
75 		return -ENOMEM;
76 	}
77 }
78 
79 int
80 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
81 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
82 			    spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
83 {
84 	if (g_succeed) {
85 		int i = g_req_records.count;
86 
87 		g_req_records.offset_blocks[i] = offset_blocks;
88 		g_req_records.num_blocks[i] = num_blocks;
89 		g_req_records.io_type[i] = CONCAT_WRITEV;
90 		g_req_records.count++;
91 		cb(NULL, true, cb_arg);
92 		g_req_records.md = opts->metadata;
93 		return 0;
94 	} else {
95 		return -ENOMEM;
96 	}
97 }
98 
99 int
100 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
101 		       uint64_t offset_blocks, uint64_t num_blocks,
102 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
103 {
104 	if (g_succeed) {
105 		int i = g_req_records.count;
106 
107 		g_req_records.offset_blocks[i] = offset_blocks;
108 		g_req_records.num_blocks[i] = num_blocks;
109 		g_req_records.io_type[i] = CONCAT_UNMAP;
110 		g_req_records.count++;
111 		cb(NULL, true, cb_arg);
112 		return 0;
113 	} else {
114 		return -ENOMEM;
115 	}
116 }
117 
118 int
119 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
120 		       uint64_t offset_blocks, uint64_t num_blocks,
121 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
122 {
123 	if (g_succeed) {
124 		int i = g_req_records.count;
125 
126 		g_req_records.offset_blocks[i] = offset_blocks;
127 		g_req_records.num_blocks[i] = num_blocks;
128 		g_req_records.io_type[i] = CONCAT_FLUSH;
129 		g_req_records.count++;
130 		cb(NULL, true, cb_arg);
131 		return 0;
132 	} else {
133 		return -ENOMEM;
134 	}
135 }
136 
137 void
138 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
139 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
140 {
141 	g_succeed = true;
142 	cb_fn(raid_io);
143 }
144 
145 static void
146 init_globals(void)
147 {
148 	int i;
149 
150 	for (i = 0; i < MAX_RECORDS; i++) {
151 		g_req_records.offset_blocks[i] = 0;
152 		g_req_records.num_blocks[i] = 0;
153 		g_req_records.io_type[i] = CONCAT_NONE;
154 	}
155 	g_req_records.count = 0;
156 	g_succeed = false;
157 }
158 
159 struct concat_params {
160 	uint8_t num_base_bdevs;
161 	uint64_t base_bdev_blockcnt;
162 	uint32_t base_bdev_blocklen;
163 	uint32_t strip_size;
164 };
165 
166 static struct concat_params *g_params;
167 static size_t g_params_count;
168 
169 #define ARRAY_FOR_EACH(a, e) \
170 	for (e = a; e < a + SPDK_COUNTOF(a); e++)
171 
172 #define CONCAT_PARAMS_FOR_EACH(p) \
173 	for (p = g_params; p < g_params + g_params_count; p++)
174 
175 static int
176 test_setup(void)
177 {
178 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
179 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
180 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
181 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
182 	uint8_t *num_base_bdevs;
183 	uint64_t *base_bdev_blockcnt;
184 	uint32_t *base_bdev_blocklen;
185 	uint32_t *strip_size_kb;
186 	struct concat_params *params;
187 
188 	g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
189 			 SPDK_COUNTOF(base_bdev_blockcnt_values) *
190 			 SPDK_COUNTOF(base_bdev_blocklen_values) *
191 			 SPDK_COUNTOF(strip_size_kb_values);
192 	g_params = calloc(g_params_count, sizeof(*g_params));
193 	if (!g_params) {
194 		return -ENOMEM;
195 	}
196 
197 	params = g_params;
198 
199 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
200 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
201 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
202 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
203 					params->num_base_bdevs = *num_base_bdevs;
204 					params->base_bdev_blockcnt = *base_bdev_blockcnt;
205 					params->base_bdev_blocklen = *base_bdev_blocklen;
206 					params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
207 					if (params->strip_size == 0 ||
208 					    params->strip_size > *base_bdev_blockcnt) {
209 						g_params_count--;
210 						continue;
211 					}
212 					params++;
213 				}
214 			}
215 		}
216 	}
217 
218 	return 0;
219 }
220 
221 static int
222 test_cleanup(void)
223 {
224 	free(g_params);
225 	return 0;
226 }
227 
228 static struct raid_bdev *
229 create_raid_bdev(struct concat_params *params)
230 {
231 	struct raid_bdev *raid_bdev;
232 	struct raid_base_bdev_info *base_info;
233 
234 	raid_bdev = calloc(1, sizeof(*raid_bdev));
235 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
236 
237 	raid_bdev->module = &g_concat_module;
238 	raid_bdev->num_base_bdevs = params->num_base_bdevs;
239 	raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
240 					   sizeof(struct raid_base_bdev_info));
241 	SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
242 
243 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
244 		base_info->bdev = calloc(1, sizeof(*base_info->bdev));
245 		SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL);
246 		base_info->desc = calloc(1, sizeof(*base_info->desc));
247 		SPDK_CU_ASSERT_FATAL(base_info->desc != NULL);
248 
249 		base_info->bdev->blockcnt = params->base_bdev_blockcnt;
250 		base_info->bdev->blocklen = params->base_bdev_blocklen;
251 	}
252 
253 	raid_bdev->strip_size = params->strip_size;
254 	raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
255 	raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
256 
257 	return raid_bdev;
258 }
259 
260 static void
261 delete_raid_bdev(struct raid_bdev *raid_bdev)
262 {
263 	struct raid_base_bdev_info *base_info;
264 
265 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
266 		free(base_info->bdev);
267 		free(base_info->desc);
268 	}
269 	free(raid_bdev->base_bdev_info);
270 	free(raid_bdev);
271 }
272 
273 static struct raid_bdev *
274 create_concat(struct concat_params *params)
275 {
276 	struct raid_bdev *raid_bdev = create_raid_bdev(params);
277 
278 	CU_ASSERT(concat_start(raid_bdev) == 0);
279 	return raid_bdev;
280 }
281 
282 static void
283 delete_concat(struct raid_bdev *raid_bdev)
284 {
285 	concat_stop(raid_bdev);
286 	delete_raid_bdev(raid_bdev);
287 }
288 
289 static void
290 test_concat_start(void)
291 {
292 	struct raid_bdev *raid_bdev;
293 	struct concat_params *params;
294 	struct concat_block_range *block_range;
295 	uint64_t total_blockcnt;
296 	int i;
297 
298 	CONCAT_PARAMS_FOR_EACH(params) {
299 		raid_bdev = create_concat(params);
300 		block_range = raid_bdev->module_private;
301 		total_blockcnt = 0;
302 		for (i = 0; i < params->num_base_bdevs; i++) {
303 			CU_ASSERT(block_range[i].start == total_blockcnt);
304 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
305 			total_blockcnt += params->base_bdev_blockcnt;
306 		}
307 		delete_concat(raid_bdev);
308 	}
309 }
310 
311 static void
312 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
313 {
314 	if (bdev_io->u.bdev.iovs) {
315 		if (bdev_io->u.bdev.iovs->iov_base) {
316 			free(bdev_io->u.bdev.iovs->iov_base);
317 		}
318 		free(bdev_io->u.bdev.iovs);
319 	}
320 
321 	if (bdev_io->u.bdev.ext_opts) {
322 		if (bdev_io->u.bdev.ext_opts->metadata) {
323 			bdev_io->u.bdev.ext_opts->metadata = NULL;
324 		}
325 		free(bdev_io->u.bdev.ext_opts);
326 	}
327 	free(bdev_io);
328 }
329 
330 static void
331 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
332 		   uint64_t lba, uint64_t blocks, int16_t iotype)
333 {
334 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
335 
336 	bdev_io->bdev = bdev;
337 	bdev_io->u.bdev.offset_blocks = lba;
338 	bdev_io->u.bdev.num_blocks = blocks;
339 	bdev_io->type = iotype;
340 
341 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
342 		return;
343 	}
344 
345 	bdev_io->u.bdev.iovcnt = 1;
346 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
347 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
348 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096);
349 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
350 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
351 	bdev_io->internal.ch = channel;
352 	bdev_io->u.bdev.ext_opts = calloc(1, sizeof(struct spdk_bdev_ext_io_opts));
353 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.ext_opts != NULL);
354 	bdev_io->u.bdev.ext_opts->metadata = (void *)0xAEDFEBAC;
355 }
356 
357 static void
358 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
359 {
360 	struct raid_bdev *raid_bdev;
361 	struct spdk_bdev_io *bdev_io;
362 	struct spdk_io_channel *ch;
363 	struct raid_bdev_io *raid_io;
364 	struct raid_bdev_io_channel *raid_ch;
365 	uint64_t lba, blocks;
366 	int i;
367 
368 	lba = 0;
369 	blocks = 1;
370 	for (i = 0; i < params->num_base_bdevs; i++) {
371 		init_globals();
372 		raid_bdev = create_concat(params);
373 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
374 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
375 		raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
376 		raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
377 		SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
378 		raid_ch->base_channel = calloc(params->num_base_bdevs,
379 					       sizeof(struct spdk_io_channel));
380 		SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
381 		raid_io->raid_ch = raid_ch;
382 		raid_io->raid_bdev = raid_bdev;
383 		ch = calloc(1, sizeof(struct spdk_io_channel));
384 		SPDK_CU_ASSERT_FATAL(ch != NULL);
385 
386 		switch (io_type) {
387 		case CONCAT_WRITEV:
388 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
389 			concat_submit_rw_request(raid_io);
390 			break;
391 		case CONCAT_READV:
392 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
393 			concat_submit_rw_request(raid_io);
394 			break;
395 		case CONCAT_UNMAP:
396 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
397 			concat_submit_null_payload_request(raid_io);
398 			break;
399 		case CONCAT_FLUSH:
400 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
401 			concat_submit_null_payload_request(raid_io);
402 			break;
403 		default:
404 			CU_ASSERT(false);
405 		}
406 
407 		/*
408 		 * We submit request to the first lba of each underlying device,
409 		 * so the offset of the underling device should always be 0.
410 		 */
411 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
412 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
413 		CU_ASSERT(g_req_records.io_type[0] == io_type);
414 		CU_ASSERT(g_req_records.count == 1);
415 		CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
416 		bdev_io_cleanup(bdev_io);
417 		free(ch);
418 		free(raid_ch->base_channel);
419 		free(raid_ch);
420 		delete_concat(raid_bdev);
421 		lba += params->base_bdev_blockcnt;
422 	}
423 }
424 
425 static void
426 test_concat_rw(void)
427 {
428 	struct concat_params *params;
429 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
430 	enum CONCAT_IO_TYPE io_type;
431 	int i;
432 
433 	CONCAT_PARAMS_FOR_EACH(params) {
434 		for (i = 0; i < 2; i ++) {
435 			io_type = io_type_list[i];
436 			submit_and_verify_rw(io_type, params);
437 		}
438 	}
439 }
440 
441 static void
442 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
443 {
444 	struct raid_bdev *raid_bdev;
445 	struct spdk_bdev_io *bdev_io;
446 	struct spdk_io_channel *ch;
447 	struct raid_bdev_io *raid_io;
448 	struct raid_bdev_io_channel *raid_ch;
449 	uint64_t lba, blocks;
450 
451 	/*
452 	 * In this unittest, all base bdevs have the same blockcnt.
453 	 * If the base_bdev_blockcnt > 1, the request will start from
454 	 * the second bdev, and across two bdevs.
455 	 * If the base_bdev_blockcnt == 1, the request will start from
456 	 * the third bdev. In this case, if there are only 3 bdevs,
457 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
458 	 * will be beyond the end of the last bdev, so we set the blocks to 1
459 	 */
460 	lba = params->base_bdev_blockcnt + 1;
461 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
462 		blocks = 1;
463 	} else {
464 		blocks = params->base_bdev_blockcnt + 1;
465 	}
466 	init_globals();
467 	raid_bdev = create_concat(params);
468 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
469 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
470 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
471 	raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
472 	SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
473 	raid_ch->base_channel = calloc(params->num_base_bdevs,
474 				       sizeof(struct spdk_io_channel));
475 	SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
476 	raid_io->raid_ch = raid_ch;
477 	raid_io->raid_bdev = raid_bdev;
478 	ch = calloc(1, sizeof(struct spdk_io_channel));
479 	SPDK_CU_ASSERT_FATAL(ch != NULL);
480 
481 	switch (io_type) {
482 	case CONCAT_UNMAP:
483 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
484 		concat_submit_null_payload_request(raid_io);
485 		break;
486 	case CONCAT_FLUSH:
487 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
488 		concat_submit_null_payload_request(raid_io);
489 		break;
490 	default:
491 		CU_ASSERT(false);
492 	}
493 
494 	if (params->base_bdev_blockcnt == 1) {
495 		if (params->num_base_bdevs == 3) {
496 			CU_ASSERT(g_req_records.count == 1);
497 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
498 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
499 		} else {
500 			CU_ASSERT(g_req_records.count == 2);
501 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
502 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
503 			CU_ASSERT(g_req_records.io_type[0] == io_type);
504 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
505 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
506 			CU_ASSERT(g_req_records.io_type[1] == io_type);
507 		}
508 	} else {
509 		CU_ASSERT(g_req_records.count == 2);
510 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
511 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
512 		CU_ASSERT(g_req_records.io_type[0] == io_type);
513 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
514 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
515 		CU_ASSERT(g_req_records.io_type[1] == io_type);
516 	}
517 	bdev_io_cleanup(bdev_io);
518 	free(ch);
519 	free(raid_ch->base_channel);
520 	free(raid_ch);
521 	delete_concat(raid_bdev);
522 }
523 
524 static void
525 test_concat_null_payload(void)
526 {
527 	struct concat_params *params;
528 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
529 	enum CONCAT_IO_TYPE io_type;
530 	int i;
531 
532 	CONCAT_PARAMS_FOR_EACH(params) {
533 		for (i = 0; i < 2; i ++) {
534 			io_type = io_type_list[i];
535 			submit_and_verify_null_payload(io_type, params);
536 		}
537 	}
538 }
539 
540 int
541 main(int argc, char **argv)
542 {
543 	CU_pSuite suite = NULL;
544 	unsigned int num_failures;
545 
546 	CU_set_error_action(CUEA_ABORT);
547 	CU_initialize_registry();
548 
549 	suite = CU_add_suite("concat", test_setup, test_cleanup);
550 	CU_ADD_TEST(suite, test_concat_start);
551 	CU_ADD_TEST(suite, test_concat_rw);
552 	CU_ADD_TEST(suite, test_concat_null_payload);
553 
554 	CU_basic_set_mode(CU_BRM_VERBOSE);
555 	CU_basic_run_tests();
556 	num_failures = CU_get_number_of_failures();
557 	CU_cleanup_registry();
558 	return num_failures;
559 }
560