xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision 6665722214804000d0052fd47fe5ac5ed6f5180b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "spdk/env.h"
9 #include "thread/thread_internal.h"
10 #include "spdk_internal/mock.h"
11 
12 #include "bdev/raid/bdev_raid.h"
13 #include "bdev/raid/concat.c"
14 #include "../common.c"
15 
16 #define BLOCK_LEN (4096)
17 
18 enum CONCAT_IO_TYPE {
19 	CONCAT_NONE = 0,
20 	CONCAT_WRITEV,
21 	CONCAT_READV,
22 	CONCAT_FLUSH,
23 	CONCAT_UNMAP,
24 };
25 
26 #define MAX_RECORDS (10)
27 /*
28  * Store the information of io requests sent to the underlying bdevs.
29  * For a single null payload request to the concat bdev,
30  * we may send multiple requests to the underling bdevs,
31  * so we store the io request information to arrays.
32  */
33 struct req_records {
34 	uint64_t offset_blocks[MAX_RECORDS];
35 	uint64_t num_blocks[MAX_RECORDS];
36 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
37 	int count;
38 	void *md;
39 } g_req_records;
40 
41 /*
42  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
43  * functions will return 0.
44  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
45  * functions will return -ENOMEM.
46  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
47  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
48  * set g_succeed to true, then the IO will succeed next time.
49  */
50 bool g_succeed;
51 
52 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
53 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
54 				      enum spdk_bdev_io_status status));
55 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
56 DEFINE_STUB(raid_bdev_io_complete_part, bool,
57 	    (struct raid_bdev_io *raid_io, uint64_t completed,
58 	     enum spdk_bdev_io_status status),
59 	    true);
60 
61 int
62 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
63 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
64 			   spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
65 {
66 	if (g_succeed) {
67 		int i = g_req_records.count;
68 
69 		g_req_records.offset_blocks[i] = offset_blocks;
70 		g_req_records.num_blocks[i] = num_blocks;
71 		g_req_records.io_type[i] = CONCAT_READV;
72 		g_req_records.count++;
73 		cb(NULL, true, cb_arg);
74 		g_req_records.md = opts->metadata;
75 		return 0;
76 	} else {
77 		return -ENOMEM;
78 	}
79 }
80 
81 int
82 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
83 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
84 			    spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
85 {
86 	if (g_succeed) {
87 		int i = g_req_records.count;
88 
89 		g_req_records.offset_blocks[i] = offset_blocks;
90 		g_req_records.num_blocks[i] = num_blocks;
91 		g_req_records.io_type[i] = CONCAT_WRITEV;
92 		g_req_records.count++;
93 		cb(NULL, true, cb_arg);
94 		g_req_records.md = opts->metadata;
95 		return 0;
96 	} else {
97 		return -ENOMEM;
98 	}
99 }
100 
101 int
102 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
103 		       uint64_t offset_blocks, uint64_t num_blocks,
104 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
105 {
106 	if (g_succeed) {
107 		int i = g_req_records.count;
108 
109 		g_req_records.offset_blocks[i] = offset_blocks;
110 		g_req_records.num_blocks[i] = num_blocks;
111 		g_req_records.io_type[i] = CONCAT_UNMAP;
112 		g_req_records.count++;
113 		cb(NULL, true, cb_arg);
114 		return 0;
115 	} else {
116 		return -ENOMEM;
117 	}
118 }
119 
120 int
121 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
122 		       uint64_t offset_blocks, uint64_t num_blocks,
123 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
124 {
125 	if (g_succeed) {
126 		int i = g_req_records.count;
127 
128 		g_req_records.offset_blocks[i] = offset_blocks;
129 		g_req_records.num_blocks[i] = num_blocks;
130 		g_req_records.io_type[i] = CONCAT_FLUSH;
131 		g_req_records.count++;
132 		cb(NULL, true, cb_arg);
133 		return 0;
134 	} else {
135 		return -ENOMEM;
136 	}
137 }
138 
139 void
140 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
141 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
142 {
143 	g_succeed = true;
144 	cb_fn(raid_io);
145 }
146 
147 static void
148 init_globals(void)
149 {
150 	int i;
151 
152 	for (i = 0; i < MAX_RECORDS; i++) {
153 		g_req_records.offset_blocks[i] = 0;
154 		g_req_records.num_blocks[i] = 0;
155 		g_req_records.io_type[i] = CONCAT_NONE;
156 	}
157 	g_req_records.count = 0;
158 	g_succeed = false;
159 }
160 
161 static int
162 test_setup(void)
163 {
164 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
165 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
166 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
167 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
168 	uint8_t *num_base_bdevs;
169 	uint64_t *base_bdev_blockcnt;
170 	uint32_t *base_bdev_blocklen;
171 	uint32_t *strip_size_kb;
172 	struct raid_params params;
173 	uint64_t params_count;
174 	int rc;
175 
176 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
177 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
178 		       SPDK_COUNTOF(base_bdev_blocklen_values) *
179 		       SPDK_COUNTOF(strip_size_kb_values);
180 	rc = raid_test_params_alloc(params_count);
181 	if (rc) {
182 		return rc;
183 	}
184 
185 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
186 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
187 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
188 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
189 					params.num_base_bdevs = *num_base_bdevs;
190 					params.base_bdev_blockcnt = *base_bdev_blockcnt;
191 					params.base_bdev_blocklen = *base_bdev_blocklen;
192 					params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
193 					if (params.strip_size == 0 ||
194 					    params.strip_size > *base_bdev_blockcnt) {
195 						continue;
196 					}
197 					raid_test_params_add(&params);
198 				}
199 			}
200 		}
201 	}
202 
203 	return 0;
204 }
205 
206 static int
207 test_cleanup(void)
208 {
209 	raid_test_params_free();
210 	return 0;
211 }
212 
213 static struct raid_bdev *
214 create_concat(struct raid_params *params)
215 {
216 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_concat_module);
217 
218 	CU_ASSERT(concat_start(raid_bdev) == 0);
219 	return raid_bdev;
220 }
221 
222 static void
223 delete_concat(struct raid_bdev *raid_bdev)
224 {
225 	concat_stop(raid_bdev);
226 	raid_test_delete_raid_bdev(raid_bdev);
227 }
228 
229 static void
230 test_concat_start(void)
231 {
232 	struct raid_bdev *raid_bdev;
233 	struct raid_params *params;
234 	struct concat_block_range *block_range;
235 	uint64_t total_blockcnt;
236 	int i;
237 
238 	RAID_PARAMS_FOR_EACH(params) {
239 		raid_bdev = create_concat(params);
240 		block_range = raid_bdev->module_private;
241 		total_blockcnt = 0;
242 		for (i = 0; i < params->num_base_bdevs; i++) {
243 			CU_ASSERT(block_range[i].start == total_blockcnt);
244 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
245 			total_blockcnt += params->base_bdev_blockcnt;
246 		}
247 		delete_concat(raid_bdev);
248 	}
249 }
250 
251 static void
252 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
253 {
254 	if (bdev_io->u.bdev.iovs) {
255 		if (bdev_io->u.bdev.iovs->iov_base) {
256 			free(bdev_io->u.bdev.iovs->iov_base);
257 		}
258 		free(bdev_io->u.bdev.iovs);
259 	}
260 
261 	if (bdev_io->u.bdev.ext_opts) {
262 		if (bdev_io->u.bdev.ext_opts->metadata) {
263 			bdev_io->u.bdev.ext_opts->metadata = NULL;
264 		}
265 		free(bdev_io->u.bdev.ext_opts);
266 	}
267 	free(bdev_io);
268 }
269 
270 static void
271 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
272 		   uint64_t lba, uint64_t blocks, int16_t iotype)
273 {
274 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
275 
276 	bdev_io->bdev = bdev;
277 	bdev_io->u.bdev.offset_blocks = lba;
278 	bdev_io->u.bdev.num_blocks = blocks;
279 	bdev_io->type = iotype;
280 
281 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
282 		return;
283 	}
284 
285 	bdev_io->u.bdev.iovcnt = 1;
286 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
287 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
288 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096);
289 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
290 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
291 	bdev_io->internal.ch = channel;
292 	bdev_io->u.bdev.ext_opts = calloc(1, sizeof(struct spdk_bdev_ext_io_opts));
293 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.ext_opts != NULL);
294 	bdev_io->u.bdev.ext_opts->metadata = (void *)0xAEDFEBAC;
295 }
296 
297 static void
298 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
299 {
300 	struct raid_bdev *raid_bdev;
301 	struct spdk_bdev_io *bdev_io;
302 	struct spdk_io_channel *ch;
303 	struct raid_bdev_io *raid_io;
304 	struct raid_bdev_io_channel *raid_ch;
305 	uint64_t lba, blocks;
306 	int i;
307 
308 	lba = 0;
309 	blocks = 1;
310 	for (i = 0; i < params->num_base_bdevs; i++) {
311 		init_globals();
312 		raid_bdev = create_concat(params);
313 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
314 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
315 		raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
316 		raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
317 		SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
318 		raid_ch->base_channel = calloc(params->num_base_bdevs,
319 					       sizeof(struct spdk_io_channel));
320 		SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
321 		raid_io->raid_ch = raid_ch;
322 		raid_io->raid_bdev = raid_bdev;
323 		ch = calloc(1, sizeof(struct spdk_io_channel));
324 		SPDK_CU_ASSERT_FATAL(ch != NULL);
325 
326 		switch (io_type) {
327 		case CONCAT_WRITEV:
328 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
329 			concat_submit_rw_request(raid_io);
330 			break;
331 		case CONCAT_READV:
332 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
333 			concat_submit_rw_request(raid_io);
334 			break;
335 		case CONCAT_UNMAP:
336 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
337 			concat_submit_null_payload_request(raid_io);
338 			break;
339 		case CONCAT_FLUSH:
340 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
341 			concat_submit_null_payload_request(raid_io);
342 			break;
343 		default:
344 			CU_ASSERT(false);
345 		}
346 
347 		/*
348 		 * We submit request to the first lba of each underlying device,
349 		 * so the offset of the underling device should always be 0.
350 		 */
351 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
352 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
353 		CU_ASSERT(g_req_records.io_type[0] == io_type);
354 		CU_ASSERT(g_req_records.count == 1);
355 		CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
356 		bdev_io_cleanup(bdev_io);
357 		free(ch);
358 		free(raid_ch->base_channel);
359 		free(raid_ch);
360 		delete_concat(raid_bdev);
361 		lba += params->base_bdev_blockcnt;
362 	}
363 }
364 
365 static void
366 test_concat_rw(void)
367 {
368 	struct raid_params *params;
369 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
370 	enum CONCAT_IO_TYPE io_type;
371 	int i;
372 
373 	RAID_PARAMS_FOR_EACH(params) {
374 		for (i = 0; i < 2; i ++) {
375 			io_type = io_type_list[i];
376 			submit_and_verify_rw(io_type, params);
377 		}
378 	}
379 }
380 
381 static void
382 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
383 {
384 	struct raid_bdev *raid_bdev;
385 	struct spdk_bdev_io *bdev_io;
386 	struct spdk_io_channel *ch;
387 	struct raid_bdev_io *raid_io;
388 	struct raid_bdev_io_channel *raid_ch;
389 	uint64_t lba, blocks;
390 
391 	/*
392 	 * In this unittest, all base bdevs have the same blockcnt.
393 	 * If the base_bdev_blockcnt > 1, the request will start from
394 	 * the second bdev, and across two bdevs.
395 	 * If the base_bdev_blockcnt == 1, the request will start from
396 	 * the third bdev. In this case, if there are only 3 bdevs,
397 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
398 	 * will be beyond the end of the last bdev, so we set the blocks to 1
399 	 */
400 	lba = params->base_bdev_blockcnt + 1;
401 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
402 		blocks = 1;
403 	} else {
404 		blocks = params->base_bdev_blockcnt + 1;
405 	}
406 	init_globals();
407 	raid_bdev = create_concat(params);
408 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
409 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
410 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
411 	raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
412 	SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
413 	raid_ch->base_channel = calloc(params->num_base_bdevs,
414 				       sizeof(struct spdk_io_channel));
415 	SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
416 	raid_io->raid_ch = raid_ch;
417 	raid_io->raid_bdev = raid_bdev;
418 	ch = calloc(1, sizeof(struct spdk_io_channel));
419 	SPDK_CU_ASSERT_FATAL(ch != NULL);
420 
421 	switch (io_type) {
422 	case CONCAT_UNMAP:
423 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
424 		concat_submit_null_payload_request(raid_io);
425 		break;
426 	case CONCAT_FLUSH:
427 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
428 		concat_submit_null_payload_request(raid_io);
429 		break;
430 	default:
431 		CU_ASSERT(false);
432 	}
433 
434 	if (params->base_bdev_blockcnt == 1) {
435 		if (params->num_base_bdevs == 3) {
436 			CU_ASSERT(g_req_records.count == 1);
437 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
438 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
439 		} else {
440 			CU_ASSERT(g_req_records.count == 2);
441 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
442 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
443 			CU_ASSERT(g_req_records.io_type[0] == io_type);
444 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
445 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
446 			CU_ASSERT(g_req_records.io_type[1] == io_type);
447 		}
448 	} else {
449 		CU_ASSERT(g_req_records.count == 2);
450 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
451 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
452 		CU_ASSERT(g_req_records.io_type[0] == io_type);
453 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
454 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
455 		CU_ASSERT(g_req_records.io_type[1] == io_type);
456 	}
457 	bdev_io_cleanup(bdev_io);
458 	free(ch);
459 	free(raid_ch->base_channel);
460 	free(raid_ch);
461 	delete_concat(raid_bdev);
462 }
463 
464 static void
465 test_concat_null_payload(void)
466 {
467 	struct raid_params *params;
468 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
469 	enum CONCAT_IO_TYPE io_type;
470 	int i;
471 
472 	RAID_PARAMS_FOR_EACH(params) {
473 		for (i = 0; i < 2; i ++) {
474 			io_type = io_type_list[i];
475 			submit_and_verify_null_payload(io_type, params);
476 		}
477 	}
478 }
479 
480 int
481 main(int argc, char **argv)
482 {
483 	CU_pSuite suite = NULL;
484 	unsigned int num_failures;
485 
486 	CU_set_error_action(CUEA_ABORT);
487 	CU_initialize_registry();
488 
489 	suite = CU_add_suite("concat", test_setup, test_cleanup);
490 	CU_ADD_TEST(suite, test_concat_start);
491 	CU_ADD_TEST(suite, test_concat_rw);
492 	CU_ADD_TEST(suite, test_concat_null_payload);
493 
494 	CU_basic_set_mode(CU_BRM_VERBOSE);
495 	CU_basic_run_tests();
496 	num_failures = CU_get_number_of_failures();
497 	CU_cleanup_registry();
498 	return num_failures;
499 }
500