xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision a8d21b9b550dde7d3e7ffc0cd1171528a136165f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "spdk/env.h"
9 #include "thread/thread_internal.h"
10 #include "spdk_internal/mock.h"
11 
12 #include "bdev/raid/bdev_raid.h"
13 #include "bdev/raid/concat.c"
14 #include "../common.c"
15 
16 DEFINE_STUB(spdk_bdev_readv_blocks_with_md, int, (struct spdk_bdev_desc *desc,
17 		struct spdk_io_channel *ch,
18 		struct iovec *iov, int iovcnt, void *md,
19 		uint64_t offset_blocks, uint64_t num_blocks,
20 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
21 DEFINE_STUB(spdk_bdev_writev_blocks_with_md, int, (struct spdk_bdev_desc *desc,
22 		struct spdk_io_channel *ch,
23 		struct iovec *iov, int iovcnt, void *md,
24 		uint64_t offset_blocks, uint64_t num_blocks,
25 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
26 
27 #define BLOCK_LEN (4096)
28 
29 enum CONCAT_IO_TYPE {
30 	CONCAT_NONE = 0,
31 	CONCAT_WRITEV,
32 	CONCAT_READV,
33 	CONCAT_FLUSH,
34 	CONCAT_UNMAP,
35 };
36 
37 #define MAX_RECORDS (10)
38 /*
39  * Store the information of io requests sent to the underlying bdevs.
40  * For a single null payload request to the concat bdev,
41  * we may send multiple requests to the underling bdevs,
42  * so we store the io request information to arrays.
43  */
44 struct req_records {
45 	uint64_t offset_blocks[MAX_RECORDS];
46 	uint64_t num_blocks[MAX_RECORDS];
47 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
48 	int count;
49 	void *md;
50 } g_req_records;
51 
52 /*
53  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
54  * functions will return 0.
55  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
56  * functions will return -ENOMEM.
57  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
58  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
59  * set g_succeed to true, then the IO will succeed next time.
60  */
61 bool g_succeed;
62 
63 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
64 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
65 				      enum spdk_bdev_io_status status));
66 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
67 DEFINE_STUB(raid_bdev_io_complete_part, bool,
68 	    (struct raid_bdev_io *raid_io, uint64_t completed,
69 	     enum spdk_bdev_io_status status),
70 	    true);
71 
72 int
73 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
74 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
75 			   spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
76 {
77 	if (g_succeed) {
78 		int i = g_req_records.count;
79 
80 		g_req_records.offset_blocks[i] = offset_blocks;
81 		g_req_records.num_blocks[i] = num_blocks;
82 		g_req_records.io_type[i] = CONCAT_READV;
83 		g_req_records.count++;
84 		cb(NULL, true, cb_arg);
85 		g_req_records.md = opts->metadata;
86 		return 0;
87 	} else {
88 		return -ENOMEM;
89 	}
90 }
91 
92 int
93 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
94 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
95 			    spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
96 {
97 	if (g_succeed) {
98 		int i = g_req_records.count;
99 
100 		g_req_records.offset_blocks[i] = offset_blocks;
101 		g_req_records.num_blocks[i] = num_blocks;
102 		g_req_records.io_type[i] = CONCAT_WRITEV;
103 		g_req_records.count++;
104 		cb(NULL, true, cb_arg);
105 		g_req_records.md = opts->metadata;
106 		return 0;
107 	} else {
108 		return -ENOMEM;
109 	}
110 }
111 
112 int
113 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
114 		       uint64_t offset_blocks, uint64_t num_blocks,
115 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
116 {
117 	if (g_succeed) {
118 		int i = g_req_records.count;
119 
120 		g_req_records.offset_blocks[i] = offset_blocks;
121 		g_req_records.num_blocks[i] = num_blocks;
122 		g_req_records.io_type[i] = CONCAT_UNMAP;
123 		g_req_records.count++;
124 		cb(NULL, true, cb_arg);
125 		return 0;
126 	} else {
127 		return -ENOMEM;
128 	}
129 }
130 
131 int
132 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 		       uint64_t offset_blocks, uint64_t num_blocks,
134 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
135 {
136 	if (g_succeed) {
137 		int i = g_req_records.count;
138 
139 		g_req_records.offset_blocks[i] = offset_blocks;
140 		g_req_records.num_blocks[i] = num_blocks;
141 		g_req_records.io_type[i] = CONCAT_FLUSH;
142 		g_req_records.count++;
143 		cb(NULL, true, cb_arg);
144 		return 0;
145 	} else {
146 		return -ENOMEM;
147 	}
148 }
149 
150 void
151 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
152 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
153 {
154 	g_succeed = true;
155 	cb_fn(raid_io);
156 }
157 
158 static void
159 init_globals(void)
160 {
161 	int i;
162 
163 	for (i = 0; i < MAX_RECORDS; i++) {
164 		g_req_records.offset_blocks[i] = 0;
165 		g_req_records.num_blocks[i] = 0;
166 		g_req_records.io_type[i] = CONCAT_NONE;
167 	}
168 	g_req_records.count = 0;
169 	g_succeed = false;
170 }
171 
172 static int
173 test_setup(void)
174 {
175 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
176 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
177 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
178 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
179 	uint8_t *num_base_bdevs;
180 	uint64_t *base_bdev_blockcnt;
181 	uint32_t *base_bdev_blocklen;
182 	uint32_t *strip_size_kb;
183 	struct raid_params params;
184 	uint64_t params_count;
185 	int rc;
186 
187 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
188 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
189 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
190 		       SPDK_COUNTOF(strip_size_kb_values);
191 	rc = raid_test_params_alloc(params_count);
192 	if (rc) {
193 		return rc;
194 	}
195 
196 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
197 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
198 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
199 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
200 					params.num_base_bdevs = *num_base_bdevs;
201 					params.base_bdev_blockcnt = *base_bdev_blockcnt;
202 					params.base_bdev_blocklen = *base_bdev_blocklen;
203 					params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
204 					params.md_len = 0;
205 					if (params.strip_size == 0 ||
206 					    params.strip_size > *base_bdev_blockcnt) {
207 						continue;
208 					}
209 					raid_test_params_add(&params);
210 				}
211 			}
212 		}
213 	}
214 
215 	return 0;
216 }
217 
218 static int
219 test_cleanup(void)
220 {
221 	raid_test_params_free();
222 	return 0;
223 }
224 
225 static struct raid_bdev *
226 create_concat(struct raid_params *params)
227 {
228 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_concat_module);
229 
230 	CU_ASSERT(concat_start(raid_bdev) == 0);
231 	return raid_bdev;
232 }
233 
234 static void
235 delete_concat(struct raid_bdev *raid_bdev)
236 {
237 	concat_stop(raid_bdev);
238 	raid_test_delete_raid_bdev(raid_bdev);
239 }
240 
241 static void
242 test_concat_start(void)
243 {
244 	struct raid_bdev *raid_bdev;
245 	struct raid_params *params;
246 	struct concat_block_range *block_range;
247 	uint64_t total_blockcnt;
248 	int i;
249 
250 	RAID_PARAMS_FOR_EACH(params) {
251 		raid_bdev = create_concat(params);
252 		block_range = raid_bdev->module_private;
253 		total_blockcnt = 0;
254 		for (i = 0; i < params->num_base_bdevs; i++) {
255 			CU_ASSERT(block_range[i].start == total_blockcnt);
256 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
257 			total_blockcnt += params->base_bdev_blockcnt;
258 		}
259 		delete_concat(raid_bdev);
260 	}
261 }
262 
263 static void
264 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
265 {
266 	if (bdev_io->u.bdev.iovs) {
267 		if (bdev_io->u.bdev.iovs->iov_base) {
268 			free(bdev_io->u.bdev.iovs->iov_base);
269 		}
270 		free(bdev_io->u.bdev.iovs);
271 	}
272 
273 	if (bdev_io->u.bdev.ext_opts) {
274 		if (bdev_io->u.bdev.ext_opts->metadata) {
275 			bdev_io->u.bdev.ext_opts->metadata = NULL;
276 		}
277 		free(bdev_io->u.bdev.ext_opts);
278 	}
279 	free(bdev_io);
280 }
281 
282 static void
283 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
284 		   uint64_t lba, uint64_t blocks, int16_t iotype)
285 {
286 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
287 
288 	bdev_io->bdev = bdev;
289 	bdev_io->u.bdev.offset_blocks = lba;
290 	bdev_io->u.bdev.num_blocks = blocks;
291 	bdev_io->type = iotype;
292 
293 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
294 		return;
295 	}
296 
297 	bdev_io->u.bdev.iovcnt = 1;
298 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
299 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
300 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096);
301 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
302 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
303 	bdev_io->internal.ch = channel;
304 	bdev_io->u.bdev.ext_opts = calloc(1, sizeof(struct spdk_bdev_ext_io_opts));
305 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.ext_opts != NULL);
306 	bdev_io->u.bdev.ext_opts->metadata = (void *)0xAEDFEBAC;
307 }
308 
309 static void
310 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
311 {
312 	struct raid_bdev *raid_bdev;
313 	struct spdk_bdev_io *bdev_io;
314 	struct spdk_io_channel *ch;
315 	struct raid_bdev_io *raid_io;
316 	struct raid_bdev_io_channel *raid_ch;
317 	uint64_t lba, blocks;
318 	int i;
319 
320 	lba = 0;
321 	blocks = 1;
322 	for (i = 0; i < params->num_base_bdevs; i++) {
323 		init_globals();
324 		raid_bdev = create_concat(params);
325 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
326 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
327 		raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
328 		raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
329 		SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
330 		raid_ch->base_channel = calloc(params->num_base_bdevs,
331 					       sizeof(struct spdk_io_channel));
332 		SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
333 		raid_io->raid_ch = raid_ch;
334 		raid_io->raid_bdev = raid_bdev;
335 		ch = calloc(1, sizeof(struct spdk_io_channel));
336 		SPDK_CU_ASSERT_FATAL(ch != NULL);
337 
338 		switch (io_type) {
339 		case CONCAT_WRITEV:
340 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
341 			concat_submit_rw_request(raid_io);
342 			break;
343 		case CONCAT_READV:
344 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
345 			concat_submit_rw_request(raid_io);
346 			break;
347 		case CONCAT_UNMAP:
348 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
349 			concat_submit_null_payload_request(raid_io);
350 			break;
351 		case CONCAT_FLUSH:
352 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
353 			concat_submit_null_payload_request(raid_io);
354 			break;
355 		default:
356 			CU_ASSERT(false);
357 		}
358 
359 		/*
360 		 * We submit request to the first lba of each underlying device,
361 		 * so the offset of the underling device should always be 0.
362 		 */
363 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
364 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
365 		CU_ASSERT(g_req_records.io_type[0] == io_type);
366 		CU_ASSERT(g_req_records.count == 1);
367 		CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
368 		bdev_io_cleanup(bdev_io);
369 		free(ch);
370 		free(raid_ch->base_channel);
371 		free(raid_ch);
372 		delete_concat(raid_bdev);
373 		lba += params->base_bdev_blockcnt;
374 	}
375 }
376 
377 static void
378 test_concat_rw(void)
379 {
380 	struct raid_params *params;
381 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
382 	enum CONCAT_IO_TYPE io_type;
383 	int i;
384 
385 	RAID_PARAMS_FOR_EACH(params) {
386 		for (i = 0; i < 2; i ++) {
387 			io_type = io_type_list[i];
388 			submit_and_verify_rw(io_type, params);
389 		}
390 	}
391 }
392 
393 static void
394 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
395 {
396 	struct raid_bdev *raid_bdev;
397 	struct spdk_bdev_io *bdev_io;
398 	struct spdk_io_channel *ch;
399 	struct raid_bdev_io *raid_io;
400 	struct raid_bdev_io_channel *raid_ch;
401 	uint64_t lba, blocks;
402 
403 	/*
404 	 * In this unittest, all base bdevs have the same blockcnt.
405 	 * If the base_bdev_blockcnt > 1, the request will start from
406 	 * the second bdev, and across two bdevs.
407 	 * If the base_bdev_blockcnt == 1, the request will start from
408 	 * the third bdev. In this case, if there are only 3 bdevs,
409 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
410 	 * will be beyond the end of the last bdev, so we set the blocks to 1
411 	 */
412 	lba = params->base_bdev_blockcnt + 1;
413 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
414 		blocks = 1;
415 	} else {
416 		blocks = params->base_bdev_blockcnt + 1;
417 	}
418 	init_globals();
419 	raid_bdev = create_concat(params);
420 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
421 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
422 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
423 	raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
424 	SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
425 	raid_ch->base_channel = calloc(params->num_base_bdevs,
426 				       sizeof(struct spdk_io_channel));
427 	SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
428 	raid_io->raid_ch = raid_ch;
429 	raid_io->raid_bdev = raid_bdev;
430 	ch = calloc(1, sizeof(struct spdk_io_channel));
431 	SPDK_CU_ASSERT_FATAL(ch != NULL);
432 
433 	switch (io_type) {
434 	case CONCAT_UNMAP:
435 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
436 		concat_submit_null_payload_request(raid_io);
437 		break;
438 	case CONCAT_FLUSH:
439 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
440 		concat_submit_null_payload_request(raid_io);
441 		break;
442 	default:
443 		CU_ASSERT(false);
444 	}
445 
446 	if (params->base_bdev_blockcnt == 1) {
447 		if (params->num_base_bdevs == 3) {
448 			CU_ASSERT(g_req_records.count == 1);
449 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
450 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
451 		} else {
452 			CU_ASSERT(g_req_records.count == 2);
453 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
454 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
455 			CU_ASSERT(g_req_records.io_type[0] == io_type);
456 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
457 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
458 			CU_ASSERT(g_req_records.io_type[1] == io_type);
459 		}
460 	} else {
461 		CU_ASSERT(g_req_records.count == 2);
462 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
463 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
464 		CU_ASSERT(g_req_records.io_type[0] == io_type);
465 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
466 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
467 		CU_ASSERT(g_req_records.io_type[1] == io_type);
468 	}
469 	bdev_io_cleanup(bdev_io);
470 	free(ch);
471 	free(raid_ch->base_channel);
472 	free(raid_ch);
473 	delete_concat(raid_bdev);
474 }
475 
476 static void
477 test_concat_null_payload(void)
478 {
479 	struct raid_params *params;
480 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
481 	enum CONCAT_IO_TYPE io_type;
482 	int i;
483 
484 	RAID_PARAMS_FOR_EACH(params) {
485 		for (i = 0; i < 2; i ++) {
486 			io_type = io_type_list[i];
487 			submit_and_verify_null_payload(io_type, params);
488 		}
489 	}
490 }
491 
492 int
493 main(int argc, char **argv)
494 {
495 	CU_pSuite suite = NULL;
496 	unsigned int num_failures;
497 
498 	CU_set_error_action(CUEA_ABORT);
499 	CU_initialize_registry();
500 
501 	suite = CU_add_suite("concat", test_setup, test_cleanup);
502 	CU_ADD_TEST(suite, test_concat_start);
503 	CU_ADD_TEST(suite, test_concat_rw);
504 	CU_ADD_TEST(suite, test_concat_null_payload);
505 
506 	CU_basic_set_mode(CU_BRM_VERBOSE);
507 	CU_basic_run_tests();
508 	num_failures = CU_get_number_of_failures();
509 	CU_cleanup_registry();
510 	return num_failures;
511 }
512