xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision 7506a7aa53d239f533af3bc768f0d2af55e735fe)
1 #include "spdk/stdinc.h"
2 #include "spdk_cunit.h"
3 #include "spdk/env.h"
4 #include "thread/thread_internal.h"
5 #include "spdk_internal/mock.h"
6 
7 #include "bdev/raid/bdev_raid.h"
8 #include "bdev/raid/concat.c"
9 
10 #define BLOCK_LEN (4096)
11 
12 enum CONCAT_IO_TYPE {
13 	CONCAT_NONE = 0,
14 	CONCAT_WRITEV,
15 	CONCAT_READV,
16 	CONCAT_FLUSH,
17 	CONCAT_UNMAP,
18 };
19 
20 struct spdk_bdev_desc {
21 	struct spdk_bdev *bdev;
22 };
23 
24 #define MAX_RECORDS (10)
25 /*
26  * Store the information of io requests sent to the underlying bdevs.
27  * For a single null payload request to the concat bdev,
28  * we may send multiple requests to the underling bdevs,
29  * so we store the io request information to arrays.
30  */
31 struct req_records {
32 	uint64_t offset_blocks[MAX_RECORDS];
33 	uint64_t num_blocks[MAX_RECORDS];
34 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
35 	int count;
36 } g_req_records;
37 
38 /*
39  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
40  * functions will return 0.
41  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
42  * functions will return -ENOMEM.
43  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
44  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
45  * set g_succeed to true, then the IO will succeed next time.
46  */
47 bool g_succeed;
48 
49 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
50 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
51 				      enum spdk_bdev_io_status status));
52 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
53 DEFINE_STUB(raid_bdev_io_complete_part, bool,
54 	    (struct raid_bdev_io *raid_io, uint64_t completed,
55 	     enum spdk_bdev_io_status status),
56 	    true);
57 
58 int
59 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
60 		       struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
61 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
62 {
63 	if (g_succeed) {
64 		int i = g_req_records.count;
65 
66 		g_req_records.offset_blocks[i] = offset_blocks;
67 		g_req_records.num_blocks[i] = num_blocks;
68 		g_req_records.io_type[i] = CONCAT_READV;
69 		g_req_records.count++;
70 		cb(NULL, true, cb_arg);
71 		return 0;
72 	} else {
73 		return -ENOMEM;
74 	}
75 }
76 
77 int
78 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
79 			struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
80 			spdk_bdev_io_completion_cb cb, void *cb_arg)
81 {
82 	if (g_succeed) {
83 		int i = g_req_records.count;
84 
85 		g_req_records.offset_blocks[i] = offset_blocks;
86 		g_req_records.num_blocks[i] = num_blocks;
87 		g_req_records.io_type[i] = CONCAT_WRITEV;
88 		g_req_records.count++;
89 		cb(NULL, true, cb_arg);
90 		return 0;
91 	} else {
92 		return -ENOMEM;
93 	}
94 }
95 
96 int
97 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
98 		       uint64_t offset_blocks, uint64_t num_blocks,
99 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
100 {
101 	if (g_succeed) {
102 		int i = g_req_records.count;
103 
104 		g_req_records.offset_blocks[i] = offset_blocks;
105 		g_req_records.num_blocks[i] = num_blocks;
106 		g_req_records.io_type[i] = CONCAT_UNMAP;
107 		g_req_records.count++;
108 		cb(NULL, true, cb_arg);
109 		return 0;
110 	} else {
111 		return -ENOMEM;
112 	}
113 }
114 
115 int
116 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
117 		       uint64_t offset_blocks, uint64_t num_blocks,
118 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
119 {
120 	if (g_succeed) {
121 		int i = g_req_records.count;
122 
123 		g_req_records.offset_blocks[i] = offset_blocks;
124 		g_req_records.num_blocks[i] = num_blocks;
125 		g_req_records.io_type[i] = CONCAT_FLUSH;
126 		g_req_records.count++;
127 		cb(NULL, true, cb_arg);
128 		return 0;
129 	} else {
130 		return -ENOMEM;
131 	}
132 }
133 
134 void
135 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
136 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
137 {
138 	g_succeed = true;
139 	cb_fn(raid_io);
140 }
141 
142 static void
143 init_globals(void)
144 {
145 	int i;
146 
147 	for (i = 0; i < MAX_RECORDS; i++) {
148 		g_req_records.offset_blocks[i] = 0;
149 		g_req_records.num_blocks[i] = 0;
150 		g_req_records.io_type[i] = CONCAT_NONE;
151 	}
152 	g_req_records.count = 0;
153 	g_succeed = false;
154 }
155 
156 struct concat_params {
157 	uint8_t num_base_bdevs;
158 	uint64_t base_bdev_blockcnt;
159 	uint32_t base_bdev_blocklen;
160 	uint32_t strip_size;
161 };
162 
163 static struct concat_params *g_params;
164 static size_t g_params_count;
165 
166 #define ARRAY_FOR_EACH(a, e) \
167 	for (e = a; e < a + SPDK_COUNTOF(a); e++)
168 
169 #define CONCAT_PARAMS_FOR_EACH(p) \
170 	for (p = g_params; p < g_params + g_params_count; p++)
171 
172 static int
173 test_setup(void)
174 {
175 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
176 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
177 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
178 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
179 	uint8_t *num_base_bdevs;
180 	uint64_t *base_bdev_blockcnt;
181 	uint32_t *base_bdev_blocklen;
182 	uint32_t *strip_size_kb;
183 	struct concat_params *params;
184 
185 	g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
186 			 SPDK_COUNTOF(base_bdev_blockcnt_values) *
187 			 SPDK_COUNTOF(base_bdev_blocklen_values) *
188 			 SPDK_COUNTOF(strip_size_kb_values);
189 	g_params = calloc(g_params_count, sizeof(*g_params));
190 	if (!g_params) {
191 		return -ENOMEM;
192 	}
193 
194 	params = g_params;
195 
196 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
197 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
198 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
199 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
200 					params->num_base_bdevs = *num_base_bdevs;
201 					params->base_bdev_blockcnt = *base_bdev_blockcnt;
202 					params->base_bdev_blocklen = *base_bdev_blocklen;
203 					params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
204 					if (params->strip_size == 0 ||
205 					    params->strip_size > *base_bdev_blockcnt) {
206 						g_params_count--;
207 						continue;
208 					}
209 					params++;
210 				}
211 			}
212 		}
213 	}
214 
215 	return 0;
216 }
217 
218 static int
219 test_cleanup(void)
220 {
221 	free(g_params);
222 	return 0;
223 }
224 
225 static struct raid_bdev *
226 create_raid_bdev(struct concat_params *params)
227 {
228 	struct raid_bdev *raid_bdev;
229 	struct raid_base_bdev_info *base_info;
230 
231 	raid_bdev = calloc(1, sizeof(*raid_bdev));
232 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
233 
234 	raid_bdev->module = &g_concat_module;
235 	raid_bdev->num_base_bdevs = params->num_base_bdevs;
236 	raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
237 					   sizeof(struct raid_base_bdev_info));
238 	SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
239 
240 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
241 		base_info->bdev = calloc(1, sizeof(*base_info->bdev));
242 		SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL);
243 		base_info->desc = calloc(1, sizeof(*base_info->desc));
244 		SPDK_CU_ASSERT_FATAL(base_info->desc != NULL);
245 
246 		base_info->bdev->blockcnt = params->base_bdev_blockcnt;
247 		base_info->bdev->blocklen = params->base_bdev_blocklen;
248 	}
249 
250 	raid_bdev->strip_size = params->strip_size;
251 	raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
252 	raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
253 
254 	return raid_bdev;
255 }
256 
257 static void
258 delete_raid_bdev(struct raid_bdev *raid_bdev)
259 {
260 	struct raid_base_bdev_info *base_info;
261 
262 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
263 		free(base_info->bdev);
264 		free(base_info->desc);
265 	}
266 	free(raid_bdev->base_bdev_info);
267 	free(raid_bdev);
268 }
269 
270 static struct raid_bdev *
271 create_concat(struct concat_params *params)
272 {
273 	struct raid_bdev *raid_bdev = create_raid_bdev(params);
274 
275 	CU_ASSERT(concat_start(raid_bdev) == 0);
276 	return raid_bdev;
277 }
278 
279 static void
280 delete_concat(struct raid_bdev *raid_bdev)
281 {
282 	concat_stop(raid_bdev);
283 	delete_raid_bdev(raid_bdev);
284 }
285 
286 static void
287 test_concat_start(void)
288 {
289 	struct raid_bdev *raid_bdev;
290 	struct concat_params *params;
291 	struct concat_block_range *block_range;
292 	uint64_t total_blockcnt;
293 	int i;
294 
295 	CONCAT_PARAMS_FOR_EACH(params) {
296 		raid_bdev = create_concat(params);
297 		block_range = raid_bdev->module_private;
298 		total_blockcnt = 0;
299 		for (i = 0; i < params->num_base_bdevs; i++) {
300 			CU_ASSERT(block_range[i].start == total_blockcnt);
301 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
302 			total_blockcnt += params->base_bdev_blockcnt;
303 		}
304 		delete_concat(raid_bdev);
305 	}
306 }
307 
308 static void
309 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
310 {
311 	if (bdev_io->u.bdev.iovs) {
312 		if (bdev_io->u.bdev.iovs->iov_base) {
313 			free(bdev_io->u.bdev.iovs->iov_base);
314 		}
315 		free(bdev_io->u.bdev.iovs);
316 	}
317 	free(bdev_io);
318 }
319 
320 static void
321 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
322 		   uint64_t lba, uint64_t blocks, int16_t iotype)
323 {
324 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
325 
326 	bdev_io->bdev = bdev;
327 	bdev_io->u.bdev.offset_blocks = lba;
328 	bdev_io->u.bdev.num_blocks = blocks;
329 	bdev_io->type = iotype;
330 
331 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
332 		return;
333 	}
334 
335 	bdev_io->u.bdev.iovcnt = 1;
336 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
337 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
338 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096);
339 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
340 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
341 	bdev_io->internal.ch = channel;
342 }
343 
344 static void
345 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
346 {
347 	struct raid_bdev *raid_bdev;
348 	struct spdk_bdev_io *bdev_io;
349 	struct spdk_io_channel *ch;
350 	struct raid_bdev_io *raid_io;
351 	struct raid_bdev_io_channel *raid_ch;
352 	uint64_t lba, blocks;
353 	int i;
354 
355 	lba = 0;
356 	blocks = 1;
357 	for (i = 0; i < params->num_base_bdevs; i++) {
358 		init_globals();
359 		raid_bdev = create_concat(params);
360 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
361 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
362 		raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
363 		raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
364 		SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
365 		raid_ch->base_channel = calloc(params->num_base_bdevs,
366 					       sizeof(struct spdk_io_channel));
367 		SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
368 		raid_io->raid_ch = raid_ch;
369 		raid_io->raid_bdev = raid_bdev;
370 		ch = calloc(1, sizeof(struct spdk_io_channel));
371 		SPDK_CU_ASSERT_FATAL(ch != NULL);
372 
373 		switch (io_type) {
374 		case CONCAT_WRITEV:
375 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
376 			concat_submit_rw_request(raid_io);
377 			break;
378 		case CONCAT_READV:
379 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
380 			concat_submit_rw_request(raid_io);
381 			break;
382 		case CONCAT_UNMAP:
383 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
384 			concat_submit_null_payload_request(raid_io);
385 			break;
386 		case CONCAT_FLUSH:
387 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
388 			concat_submit_null_payload_request(raid_io);
389 			break;
390 		default:
391 			CU_ASSERT(false);
392 		}
393 
394 		/*
395 		 * We submit request to the first lba of each underlying device,
396 		 * so the offset of the underling device should always be 0.
397 		 */
398 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
399 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
400 		CU_ASSERT(g_req_records.io_type[0] == io_type);
401 		CU_ASSERT(g_req_records.count == 1);
402 		bdev_io_cleanup(bdev_io);
403 		free(ch);
404 		free(raid_ch->base_channel);
405 		free(raid_ch);
406 		delete_concat(raid_bdev);
407 		lba += params->base_bdev_blockcnt;
408 	}
409 }
410 
411 static void
412 test_concat_rw(void)
413 {
414 	struct concat_params *params;
415 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
416 	enum CONCAT_IO_TYPE io_type;
417 	int i;
418 
419 	CONCAT_PARAMS_FOR_EACH(params) {
420 		for (i = 0; i < 2; i ++) {
421 			io_type = io_type_list[i];
422 			submit_and_verify_rw(io_type, params);
423 		}
424 	}
425 }
426 
427 static void
428 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
429 {
430 	struct raid_bdev *raid_bdev;
431 	struct spdk_bdev_io *bdev_io;
432 	struct spdk_io_channel *ch;
433 	struct raid_bdev_io *raid_io;
434 	struct raid_bdev_io_channel *raid_ch;
435 	uint64_t lba, blocks;
436 
437 	/*
438 	 * In this unittest, all base bdevs have the same blockcnt.
439 	 * If the base_bdev_blockcnt > 1, the request will start from
440 	 * the second bdev, and across two bdevs.
441 	 * If the base_bdev_blockcnt == 1, the request will start from
442 	 * the third bdev. In this case, if there are only 3 bdevs,
443 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
444 	 * will be beyond the end of the last bdev, so we set the blocks to 1
445 	 */
446 	lba = params->base_bdev_blockcnt + 1;
447 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
448 		blocks = 1;
449 	} else {
450 		blocks = params->base_bdev_blockcnt + 1;
451 	}
452 	init_globals();
453 	raid_bdev = create_concat(params);
454 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
455 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
456 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
457 	raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
458 	SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
459 	raid_ch->base_channel = calloc(params->num_base_bdevs,
460 				       sizeof(struct spdk_io_channel));
461 	SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
462 	raid_io->raid_ch = raid_ch;
463 	raid_io->raid_bdev = raid_bdev;
464 	ch = calloc(1, sizeof(struct spdk_io_channel));
465 	SPDK_CU_ASSERT_FATAL(ch != NULL);
466 
467 	switch (io_type) {
468 	case CONCAT_UNMAP:
469 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
470 		concat_submit_null_payload_request(raid_io);
471 		break;
472 	case CONCAT_FLUSH:
473 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
474 		concat_submit_null_payload_request(raid_io);
475 		break;
476 	default:
477 		CU_ASSERT(false);
478 	}
479 
480 	if (params->base_bdev_blockcnt == 1) {
481 		if (params->num_base_bdevs == 3) {
482 			CU_ASSERT(g_req_records.count == 1);
483 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
484 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
485 		} else {
486 			CU_ASSERT(g_req_records.count == 2);
487 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
488 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
489 			CU_ASSERT(g_req_records.io_type[0] == io_type);
490 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
491 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
492 			CU_ASSERT(g_req_records.io_type[1] == io_type);
493 		}
494 	} else {
495 		CU_ASSERT(g_req_records.count == 2);
496 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
497 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
498 		CU_ASSERT(g_req_records.io_type[0] == io_type);
499 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
500 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
501 		CU_ASSERT(g_req_records.io_type[1] == io_type);
502 	}
503 	bdev_io_cleanup(bdev_io);
504 	free(ch);
505 	free(raid_ch->base_channel);
506 	free(raid_ch);
507 	delete_concat(raid_bdev);
508 }
509 
510 static void
511 test_concat_null_payload(void)
512 {
513 	struct concat_params *params;
514 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
515 	enum CONCAT_IO_TYPE io_type;
516 	int i;
517 
518 	CONCAT_PARAMS_FOR_EACH(params) {
519 		for (i = 0; i < 2; i ++) {
520 			io_type = io_type_list[i];
521 			submit_and_verify_null_payload(io_type, params);
522 		}
523 	}
524 }
525 
526 int
527 main(int argc, char **argv)
528 {
529 	CU_pSuite suite = NULL;
530 	unsigned int num_failures;
531 
532 	CU_set_error_action(CUEA_ABORT);
533 	CU_initialize_registry();
534 
535 	suite = CU_add_suite("concat", test_setup, test_cleanup);
536 	CU_ADD_TEST(suite, test_concat_start);
537 	CU_ADD_TEST(suite, test_concat_rw);
538 	CU_ADD_TEST(suite, test_concat_null_payload);
539 
540 	CU_basic_set_mode(CU_BRM_VERBOSE);
541 	CU_basic_run_tests();
542 	num_failures = CU_get_number_of_failures();
543 	CU_cleanup_registry();
544 	return num_failures;
545 }
546