xref: /spdk/test/unit/lib/bdev/raid/raid1.c/raid1_ut.c (revision f8abbede89d30584d2a4f8427b13896f8591b873)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9 
10 #include "common/lib/ut_multithread.c"
11 
12 #include "bdev/raid/raid1.c"
13 #include "../common.c"
14 
15 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
16 DEFINE_STUB_V(raid_bdev_module_stop_done, (struct raid_bdev *raid_bdev));
17 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
18 				      enum spdk_bdev_io_status status));
19 DEFINE_STUB(raid_bdev_io_complete_part, bool, (struct raid_bdev_io *raid_io, uint64_t completed,
20 		enum spdk_bdev_io_status status), true);
21 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
22 DEFINE_STUB_V(raid_bdev_queue_io_wait, (struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
23 					struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn));
24 DEFINE_STUB(spdk_bdev_readv_blocks_with_md, int, (struct spdk_bdev_desc *desc,
25 		struct spdk_io_channel *ch,
26 		struct iovec *iov, int iovcnt, void *md,
27 		uint64_t offset_blocks, uint64_t num_blocks,
28 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
29 DEFINE_STUB(spdk_bdev_writev_blocks_with_md, int, (struct spdk_bdev_desc *desc,
30 		struct spdk_io_channel *ch,
31 		struct iovec *iov, int iovcnt, void *md,
32 		uint64_t offset_blocks, uint64_t num_blocks,
33 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
34 DEFINE_STUB(spdk_bdev_readv_blocks_ext, int, (struct spdk_bdev_desc *desc,
35 		struct spdk_io_channel *ch,
36 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
37 		spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
38 DEFINE_STUB(spdk_bdev_writev_blocks_ext, int, (struct spdk_bdev_desc *desc,
39 		struct spdk_io_channel *ch,
40 		struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
41 		spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts), 0);
42 
43 static int
44 test_setup(void)
45 {
46 	uint8_t num_base_bdevs_values[] = { 2, 3 };
47 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
48 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
49 	uint8_t *num_base_bdevs;
50 	uint64_t *base_bdev_blockcnt;
51 	uint32_t *base_bdev_blocklen;
52 	struct raid_params params;
53 	uint64_t params_count;
54 	int rc;
55 
56 	params_count = SPDK_COUNTOF(num_base_bdevs_values) *
57 		       SPDK_COUNTOF(base_bdev_blockcnt_values) *
58 		       SPDK_COUNTOF(base_bdev_blocklen_values);
59 	rc = raid_test_params_alloc(params_count);
60 	if (rc) {
61 		return rc;
62 	}
63 
64 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
65 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
66 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
67 				params.num_base_bdevs = *num_base_bdevs;
68 				params.base_bdev_blockcnt = *base_bdev_blockcnt;
69 				params.base_bdev_blocklen = *base_bdev_blocklen;
70 				params.strip_size = 0;
71 				params.md_len = 0;
72 				raid_test_params_add(&params);
73 			}
74 		}
75 	}
76 
77 	return 0;
78 }
79 
80 static int
81 test_cleanup(void)
82 {
83 	raid_test_params_free();
84 	return 0;
85 }
86 
87 static struct raid1_info *
88 create_raid1(struct raid_params *params)
89 {
90 	struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_raid1_module);
91 
92 	SPDK_CU_ASSERT_FATAL(raid1_start(raid_bdev) == 0);
93 
94 	return raid_bdev->module_private;
95 }
96 
97 static void
98 delete_raid1(struct raid1_info *r1_info)
99 {
100 	struct raid_bdev *raid_bdev = r1_info->raid_bdev;
101 
102 	raid1_stop(raid_bdev);
103 
104 	raid_test_delete_raid_bdev(raid_bdev);
105 }
106 
107 static void
108 test_raid1_start(void)
109 {
110 	struct raid_params *params;
111 
112 	RAID_PARAMS_FOR_EACH(params) {
113 		struct raid1_info *r1_info;
114 
115 		r1_info = create_raid1(params);
116 
117 		SPDK_CU_ASSERT_FATAL(r1_info != NULL);
118 
119 		CU_ASSERT_EQUAL(r1_info->raid_bdev->level, RAID1);
120 		CU_ASSERT_EQUAL(r1_info->raid_bdev->bdev.blockcnt, params->base_bdev_blockcnt);
121 		CU_ASSERT_PTR_EQUAL(r1_info->raid_bdev->module, &g_raid1_module);
122 
123 		delete_raid1(r1_info);
124 	}
125 }
126 
127 static struct raid_bdev_io *
128 get_raid_io(struct raid1_info *r1_info, struct raid_bdev_io_channel *raid_ch,
129 	    enum spdk_bdev_io_type io_type, uint64_t num_blocks)
130 {
131 	struct spdk_bdev_io *bdev_io;
132 	struct raid_bdev_io *raid_io;
133 
134 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
135 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
136 
137 	bdev_io->bdev = &r1_info->raid_bdev->bdev;
138 	bdev_io->type = io_type;
139 	bdev_io->u.bdev.offset_blocks = 0;
140 	bdev_io->u.bdev.num_blocks = num_blocks;
141 	bdev_io->internal.cb = NULL;
142 	bdev_io->internal.caller_ctx = NULL;
143 
144 	raid_io = (void *)bdev_io->driver_ctx;
145 	raid_io->raid_bdev = r1_info->raid_bdev;
146 	raid_io->raid_ch = raid_ch;
147 
148 	return raid_io;
149 }
150 
151 static void
152 put_raid_io(struct raid_bdev_io *raid_io)
153 {
154 	free(spdk_bdev_io_from_ctx(raid_io));
155 }
156 
157 static void
158 run_for_each_raid1_config(void (*test_fn)(struct raid_bdev *raid_bdev,
159 			  struct raid_bdev_io_channel *raid_ch))
160 {
161 	struct raid_params *params;
162 
163 	RAID_PARAMS_FOR_EACH(params) {
164 		struct raid1_info *r1_info;
165 		struct raid_bdev_io_channel raid_ch = { 0 };
166 		int i;
167 
168 		r1_info = create_raid1(params);
169 
170 		raid_ch.num_channels = params->num_base_bdevs;
171 		raid_ch.base_channel = calloc(params->num_base_bdevs, sizeof(struct spdk_io_channel *));
172 		SPDK_CU_ASSERT_FATAL(raid_ch.base_channel != NULL);
173 		for (i = 0; i < raid_ch.num_channels; i++) {
174 			raid_ch.base_channel[i] = calloc(1, sizeof(*raid_ch.base_channel));
175 			SPDK_CU_ASSERT_FATAL(raid_ch.base_channel[i] != NULL);
176 		}
177 
178 		raid_ch.module_channel = raid1_get_io_channel(r1_info->raid_bdev);
179 		SPDK_CU_ASSERT_FATAL(raid_ch.module_channel);
180 
181 		test_fn(r1_info->raid_bdev, &raid_ch);
182 
183 		spdk_put_io_channel(raid_ch.module_channel);
184 		poll_threads();
185 
186 		for (i = 0; i < raid_ch.num_channels; i++) {
187 			free(raid_ch.base_channel[i]);
188 		}
189 		free(raid_ch.base_channel);
190 
191 		delete_raid1(r1_info);
192 	}
193 }
194 
195 static void
196 _test_raid1_read_balancing(struct raid_bdev *raid_bdev, struct raid_bdev_io_channel *raid_ch)
197 {
198 	struct raid1_info *r1_info = raid_bdev->module_private;
199 	struct raid1_io_channel *raid1_ch = spdk_io_channel_get_ctx(raid_ch->module_channel);
200 	uint8_t big_io_base_bdev_idx;
201 	const uint64_t big_io_blocks = 256;
202 	const uint64_t small_io_blocks = 4;
203 	uint64_t blocks_remaining;
204 	struct raid_bdev_io *raid_io;
205 	uint8_t i;
206 	int n;
207 
208 	/* same sized IOs should be be spread evenly across all base bdevs */
209 	for (n = 0; n < 3; n++) {
210 		for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
211 			raid_io = get_raid_io(r1_info, raid_ch, SPDK_BDEV_IO_TYPE_READ, small_io_blocks);
212 			raid1_submit_read_request(raid_io);
213 			CU_ASSERT(raid_io->base_bdev_io_submitted == i);
214 			put_raid_io(raid_io);
215 		}
216 	}
217 
218 	for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
219 		CU_ASSERT(raid1_ch->read_blocks_outstanding[i] == n * small_io_blocks);
220 		raid1_ch->read_blocks_outstanding[i] = 0;
221 	}
222 
223 	/*
224 	 * Submit one big and many small IOs. The small IOs should not land on the same base bdev
225 	 * as the big until the submitted block count is matched.
226 	 */
227 	raid_io = get_raid_io(r1_info, raid_ch, SPDK_BDEV_IO_TYPE_READ, big_io_blocks);
228 	raid1_submit_read_request(raid_io);
229 	big_io_base_bdev_idx = raid_io->base_bdev_io_submitted;
230 	put_raid_io(raid_io);
231 
232 	blocks_remaining = big_io_blocks * (raid_bdev->num_base_bdevs - 1);
233 	while (blocks_remaining > 0) {
234 		raid_io = get_raid_io(r1_info, raid_ch, SPDK_BDEV_IO_TYPE_READ, small_io_blocks);
235 		raid1_submit_read_request(raid_io);
236 		CU_ASSERT(raid_io->base_bdev_io_submitted != big_io_base_bdev_idx);
237 		put_raid_io(raid_io);
238 		blocks_remaining -= small_io_blocks;
239 	}
240 
241 	for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
242 		CU_ASSERT(raid1_ch->read_blocks_outstanding[i] == big_io_blocks);
243 	}
244 
245 	raid_io = get_raid_io(r1_info, raid_ch, SPDK_BDEV_IO_TYPE_READ, small_io_blocks);
246 	raid1_submit_read_request(raid_io);
247 	CU_ASSERT(raid_io->base_bdev_io_submitted == big_io_base_bdev_idx);
248 	put_raid_io(raid_io);
249 }
250 
251 static void
252 test_raid1_read_balancing(void)
253 {
254 	run_for_each_raid1_config(_test_raid1_read_balancing);
255 }
256 
257 int
258 main(int argc, char **argv)
259 {
260 	CU_pSuite suite = NULL;
261 	unsigned int num_failures;
262 
263 	CU_initialize_registry();
264 
265 	suite = CU_add_suite("raid1", test_setup, test_cleanup);
266 	CU_ADD_TEST(suite, test_raid1_start);
267 	CU_ADD_TEST(suite, test_raid1_read_balancing);
268 
269 	allocate_threads(1);
270 	set_thread(0);
271 
272 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
273 	CU_cleanup_registry();
274 	return num_failures;
275 }
276