xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision 45a053c5777494f4e8ce4bc1191c9de3920377f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 
10 #include "common/lib/ut_multithread.c"
11 
12 #include "bdev/raid/concat.c"
13 #include "../common.c"
14 
15 DEFINE_STUB(spdk_bdev_readv_blocks_with_md, int, (struct spdk_bdev_desc *desc,
16 		struct spdk_io_channel *ch,
17 		struct iovec *iov, int iovcnt, void *md,
18 		uint64_t offset_blocks, uint64_t num_blocks,
19 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
20 DEFINE_STUB(spdk_bdev_writev_blocks_with_md, int, (struct spdk_bdev_desc *desc,
21 		struct spdk_io_channel *ch,
22 		struct iovec *iov, int iovcnt, void *md,
23 		uint64_t offset_blocks, uint64_t num_blocks,
24 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
25 
26 #define BLOCK_LEN (4096)
27 
28 enum CONCAT_IO_TYPE {
29 	CONCAT_NONE = 0,
30 	CONCAT_WRITEV,
31 	CONCAT_READV,
32 	CONCAT_FLUSH,
33 	CONCAT_UNMAP,
34 };
35 
36 #define MAX_RECORDS (10)
37 /*
38  * Store the information of io requests sent to the underlying bdevs.
39  * For a single null payload request to the concat bdev,
40  * we may send multiple requests to the underling bdevs,
41  * so we store the io request information to arrays.
42  */
43 struct req_records {
44 	uint64_t offset_blocks[MAX_RECORDS];
45 	uint64_t num_blocks[MAX_RECORDS];
46 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
47 	int count;
48 	void *md;
49 } g_req_records;
50 
51 /*
52  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
53  * functions will return 0.
54  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
55  * functions will return -ENOMEM.
56  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
57  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
58  * set g_succeed to true, then the IO will succeed next time.
59  */
60 bool g_succeed;
61 
62 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
63 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
64 
65 int
66 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
67 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
68 			   spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
69 {
70 	if (g_succeed) {
71 		int i = g_req_records.count;
72 
73 		g_req_records.offset_blocks[i] = offset_blocks;
74 		g_req_records.num_blocks[i] = num_blocks;
75 		g_req_records.io_type[i] = CONCAT_READV;
76 		g_req_records.count++;
77 		cb(NULL, true, cb_arg);
78 		g_req_records.md = opts->metadata;
79 		return 0;
80 	} else {
81 		return -ENOMEM;
82 	}
83 }
84 
85 int
86 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
87 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
88 			    spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
89 {
90 	if (g_succeed) {
91 		int i = g_req_records.count;
92 
93 		g_req_records.offset_blocks[i] = offset_blocks;
94 		g_req_records.num_blocks[i] = num_blocks;
95 		g_req_records.io_type[i] = CONCAT_WRITEV;
96 		g_req_records.count++;
97 		cb(NULL, true, cb_arg);
98 		g_req_records.md = opts->metadata;
99 		return 0;
100 	} else {
101 		return -ENOMEM;
102 	}
103 }
104 
105 int
106 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
107 		       uint64_t offset_blocks, uint64_t num_blocks,
108 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
109 {
110 	if (g_succeed) {
111 		int i = g_req_records.count;
112 
113 		g_req_records.offset_blocks[i] = offset_blocks;
114 		g_req_records.num_blocks[i] = num_blocks;
115 		g_req_records.io_type[i] = CONCAT_UNMAP;
116 		g_req_records.count++;
117 		cb(NULL, true, cb_arg);
118 		return 0;
119 	} else {
120 		return -ENOMEM;
121 	}
122 }
123 
124 int
125 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
126 		       uint64_t offset_blocks, uint64_t num_blocks,
127 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
128 {
129 	if (g_succeed) {
130 		int i = g_req_records.count;
131 
132 		g_req_records.offset_blocks[i] = offset_blocks;
133 		g_req_records.num_blocks[i] = num_blocks;
134 		g_req_records.io_type[i] = CONCAT_FLUSH;
135 		g_req_records.count++;
136 		cb(NULL, true, cb_arg);
137 		return 0;
138 	} else {
139 		return -ENOMEM;
140 	}
141 }
142 
143 void
144 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
145 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
146 {
147 	g_succeed = true;
148 	cb_fn(raid_io);
149 }
150 
151 static void
152 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
153 {
154 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
155 }
156 
157 static void
158 init_globals(void)
159 {
160 	int i;
161 
162 	for (i = 0; i < MAX_RECORDS; i++) {
163 		g_req_records.offset_blocks[i] = 0;
164 		g_req_records.num_blocks[i] = 0;
165 		g_req_records.io_type[i] = CONCAT_NONE;
166 	}
167 	g_req_records.count = 0;
168 	g_succeed = false;
169 }
170 
171 static int
172 test_setup(void)
173 {
174 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
175 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
176 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
177 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
178 	uint8_t *num_base_bdevs;
179 	uint64_t *base_bdev_blockcnt;
180 	uint32_t *base_bdev_blocklen;
181 	uint32_t *strip_size_kb;
182 	struct raid_params params;
183 	uint64_t params_count;
184 	int rc;
185 
186 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
187 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
188 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
189 		       SPDK_COUNTOF(strip_size_kb_values);
190 	rc = raid_test_params_alloc(params_count);
191 	if (rc) {
192 		return rc;
193 	}
194 
195 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
196 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
197 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
198 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
199 					params.num_base_bdevs = *num_base_bdevs;
200 					params.base_bdev_blockcnt = *base_bdev_blockcnt;
201 					params.base_bdev_blocklen = *base_bdev_blocklen;
202 					params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
203 					params.md_len = 0;
204 					if (params.strip_size == 0 ||
205 					    params.strip_size > *base_bdev_blockcnt) {
206 						continue;
207 					}
208 					raid_test_params_add(&params);
209 				}
210 			}
211 		}
212 	}
213 
214 	return 0;
215 }
216 
217 static int
218 test_cleanup(void)
219 {
220 	raid_test_params_free();
221 	return 0;
222 }
223 
224 static struct raid_bdev *
225 create_concat(struct raid_params *params)
226 {
227 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_concat_module);
228 
229 	CU_ASSERT(concat_start(raid_bdev) == 0);
230 	return raid_bdev;
231 }
232 
233 static void
234 delete_concat(struct raid_bdev *raid_bdev)
235 {
236 	concat_stop(raid_bdev);
237 	raid_test_delete_raid_bdev(raid_bdev);
238 }
239 
240 static void
241 test_concat_start(void)
242 {
243 	struct raid_bdev *raid_bdev;
244 	struct raid_params *params;
245 	struct concat_block_range *block_range;
246 	uint64_t total_blockcnt;
247 	int i;
248 
249 	RAID_PARAMS_FOR_EACH(params) {
250 		raid_bdev = create_concat(params);
251 		block_range = raid_bdev->module_private;
252 		total_blockcnt = 0;
253 		for (i = 0; i < params->num_base_bdevs; i++) {
254 			CU_ASSERT(block_range[i].start == total_blockcnt);
255 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
256 			total_blockcnt += params->base_bdev_blockcnt;
257 		}
258 		delete_concat(raid_bdev);
259 	}
260 }
261 
262 static void
263 raid_io_cleanup(struct raid_bdev_io *raid_io)
264 {
265 	if (raid_io->iovs) {
266 		free(raid_io->iovs->iov_base);
267 		free(raid_io->iovs);
268 	}
269 
270 	free(raid_io);
271 }
272 
273 static void
274 raid_io_initialize(struct raid_bdev_io *raid_io, struct raid_bdev_io_channel *raid_ch,
275 		   struct raid_bdev *raid_bdev, uint64_t lba, uint64_t blocks, int16_t iotype)
276 {
277 	struct iovec *iovs;
278 	int iovcnt;
279 	void *md_buf;
280 
281 	if (iotype == SPDK_BDEV_IO_TYPE_UNMAP || iotype == SPDK_BDEV_IO_TYPE_FLUSH) {
282 		iovs = NULL;
283 		iovcnt = 0;
284 		md_buf = NULL;
285 	} else {
286 		iovcnt = 1;
287 		iovs = calloc(iovcnt, sizeof(struct iovec));
288 		SPDK_CU_ASSERT_FATAL(iovs != NULL);
289 		iovs->iov_len = raid_io->num_blocks * BLOCK_LEN;
290 		iovs->iov_base = calloc(1, iovs->iov_len);
291 		SPDK_CU_ASSERT_FATAL(iovs->iov_base != NULL);
292 		md_buf = (void *)0xAEDFEBAC;
293 	}
294 
295 	raid_test_bdev_io_init(raid_io, raid_bdev, raid_ch, iotype, lba, blocks, iovs, iovcnt, md_buf);
296 }
297 
298 static void
299 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
300 {
301 	struct raid_bdev *raid_bdev;
302 	struct raid_bdev_io *raid_io;
303 	struct raid_bdev_io_channel *raid_ch;
304 	uint64_t lba, blocks;
305 	int i;
306 
307 	lba = 0;
308 	blocks = 1;
309 	for (i = 0; i < params->num_base_bdevs; i++) {
310 		init_globals();
311 		raid_bdev = create_concat(params);
312 		raid_io = calloc(1, sizeof(*raid_io));
313 		SPDK_CU_ASSERT_FATAL(raid_io != NULL);
314 		raid_ch = raid_test_create_io_channel(raid_bdev);
315 
316 		switch (io_type) {
317 		case CONCAT_WRITEV:
318 			raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
319 			concat_submit_rw_request(raid_io);
320 			break;
321 		case CONCAT_READV:
322 			raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
323 			concat_submit_rw_request(raid_io);
324 			break;
325 		case CONCAT_UNMAP:
326 			raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
327 			concat_submit_null_payload_request(raid_io);
328 			break;
329 		case CONCAT_FLUSH:
330 			raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
331 			concat_submit_null_payload_request(raid_io);
332 			break;
333 		default:
334 			CU_ASSERT(false);
335 		}
336 
337 		/*
338 		 * We submit request to the first lba of each underlying device,
339 		 * so the offset of the underling device should always be 0.
340 		 */
341 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
342 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
343 		CU_ASSERT(g_req_records.io_type[0] == io_type);
344 		CU_ASSERT(g_req_records.count == 1);
345 		CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
346 		raid_io_cleanup(raid_io);
347 		raid_test_destroy_io_channel(raid_ch);
348 		delete_concat(raid_bdev);
349 		lba += params->base_bdev_blockcnt;
350 	}
351 }
352 
353 static void
354 test_concat_rw(void)
355 {
356 	struct raid_params *params;
357 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
358 	enum CONCAT_IO_TYPE io_type;
359 	int i;
360 
361 	RAID_PARAMS_FOR_EACH(params) {
362 		for (i = 0; i < 2; i ++) {
363 			io_type = io_type_list[i];
364 			submit_and_verify_rw(io_type, params);
365 		}
366 	}
367 }
368 
369 static void
370 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
371 {
372 	struct raid_bdev *raid_bdev;
373 	struct raid_bdev_io *raid_io;
374 	struct raid_bdev_io_channel *raid_ch;
375 	uint64_t lba, blocks;
376 
377 	/*
378 	 * In this unittest, all base bdevs have the same blockcnt.
379 	 * If the base_bdev_blockcnt > 1, the request will start from
380 	 * the second bdev, and across two bdevs.
381 	 * If the base_bdev_blockcnt == 1, the request will start from
382 	 * the third bdev. In this case, if there are only 3 bdevs,
383 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
384 	 * will be beyond the end of the last bdev, so we set the blocks to 1
385 	 */
386 	lba = params->base_bdev_blockcnt + 1;
387 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
388 		blocks = 1;
389 	} else {
390 		blocks = params->base_bdev_blockcnt + 1;
391 	}
392 	init_globals();
393 	raid_bdev = create_concat(params);
394 	raid_io = calloc(1, sizeof(*raid_io));
395 	SPDK_CU_ASSERT_FATAL(raid_io != NULL);
396 	raid_ch = raid_test_create_io_channel(raid_bdev);
397 
398 	switch (io_type) {
399 	case CONCAT_UNMAP:
400 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
401 		concat_submit_null_payload_request(raid_io);
402 		break;
403 	case CONCAT_FLUSH:
404 		raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
405 		concat_submit_null_payload_request(raid_io);
406 		break;
407 	default:
408 		CU_ASSERT(false);
409 	}
410 
411 	if (params->base_bdev_blockcnt == 1) {
412 		if (params->num_base_bdevs == 3) {
413 			CU_ASSERT(g_req_records.count == 1);
414 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
415 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
416 		} else {
417 			CU_ASSERT(g_req_records.count == 2);
418 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
419 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
420 			CU_ASSERT(g_req_records.io_type[0] == io_type);
421 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
422 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
423 			CU_ASSERT(g_req_records.io_type[1] == io_type);
424 		}
425 	} else {
426 		CU_ASSERT(g_req_records.count == 2);
427 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
428 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
429 		CU_ASSERT(g_req_records.io_type[0] == io_type);
430 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
431 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
432 		CU_ASSERT(g_req_records.io_type[1] == io_type);
433 	}
434 	raid_io_cleanup(raid_io);
435 	raid_test_destroy_io_channel(raid_ch);
436 	delete_concat(raid_bdev);
437 }
438 
439 static void
440 test_concat_null_payload(void)
441 {
442 	struct raid_params *params;
443 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
444 	enum CONCAT_IO_TYPE io_type;
445 	int i;
446 
447 	RAID_PARAMS_FOR_EACH(params) {
448 		for (i = 0; i < 2; i ++) {
449 			io_type = io_type_list[i];
450 			submit_and_verify_null_payload(io_type, params);
451 		}
452 	}
453 }
454 
455 int
456 main(int argc, char **argv)
457 {
458 	CU_pSuite suite = NULL;
459 	unsigned int num_failures;
460 
461 	CU_initialize_registry();
462 
463 	suite = CU_add_suite("concat", test_setup, test_cleanup);
464 	CU_ADD_TEST(suite, test_concat_start);
465 	CU_ADD_TEST(suite, test_concat_rw);
466 	CU_ADD_TEST(suite, test_concat_null_payload);
467 
468 	allocate_threads(1);
469 	set_thread(0);
470 
471 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
472 	CU_cleanup_registry();
473 
474 	free_threads();
475 
476 	return num_failures;
477 }
478