xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 7e846d2bb99838a21b042dd2db1d0e36eb17f95c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	int			io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 struct ut_bdev g_bdev;
61 struct spdk_bdev_desc *g_desc;
62 
63 static int
64 stub_create_ch(void *io_device, void *ctx_buf)
65 {
66 	struct ut_bdev_channel *ch = ctx_buf;
67 
68 	TAILQ_INIT(&ch->outstanding_io);
69 	ch->outstanding_cnt = 0;
70 	/*
71 	 * When avail gets to 0, the submit_request function will return ENOMEM.
72 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
73 	 *  big value that won't get hit.  The ENOMEM tests can then override this
74 	 *  value to something much smaller to induce ENOMEM conditions.
75 	 */
76 	ch->avail_cnt = 2048;
77 	return 0;
78 }
79 
80 static void
81 stub_destroy_ch(void *io_device, void *ctx_buf)
82 {
83 }
84 
85 static struct spdk_io_channel *
86 stub_get_io_channel(void *ctx)
87 {
88 	return spdk_get_io_channel(&g_bdev.io_target);
89 }
90 
91 static int
92 stub_destruct(void *ctx)
93 {
94 	return 0;
95 }
96 
97 static void
98 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
99 {
100 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
101 
102 	if (ch->avail_cnt > 0) {
103 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
104 		ch->outstanding_cnt++;
105 		ch->avail_cnt--;
106 	} else {
107 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
108 	}
109 }
110 
111 static uint32_t
112 stub_complete_io(uint32_t num_to_complete)
113 {
114 	struct spdk_io_channel *_ch = spdk_get_io_channel(&g_bdev.io_target);
115 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
116 	struct spdk_bdev_io *io;
117 	bool complete_all = (num_to_complete == 0);
118 	uint32_t num_completed = 0;
119 
120 	while (complete_all || num_completed < num_to_complete) {
121 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
122 			break;
123 		}
124 		io = TAILQ_FIRST(&ch->outstanding_io);
125 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
126 		ch->outstanding_cnt--;
127 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
128 		ch->avail_cnt++;
129 		num_completed++;
130 	}
131 
132 	spdk_put_io_channel(_ch);
133 	return num_completed;
134 }
135 
136 static struct spdk_bdev_fn_table fn_table = {
137 	.get_io_channel =	stub_get_io_channel,
138 	.destruct =		stub_destruct,
139 	.submit_request =	stub_submit_request,
140 };
141 
142 static int
143 module_init(void)
144 {
145 	return 0;
146 }
147 
148 static void
149 module_fini(void)
150 {
151 }
152 
153 SPDK_BDEV_MODULE_REGISTER(bdev_ut, module_init, module_fini, NULL, NULL, NULL)
154 
155 static void
156 register_bdev(void)
157 {
158 	g_bdev.bdev.name = "bdev_ut";
159 	g_bdev.bdev.fn_table = &fn_table;
160 	g_bdev.bdev.module = SPDK_GET_BDEV_MODULE(bdev_ut);
161 	g_bdev.bdev.blocklen = 4096;
162 	g_bdev.bdev.blockcnt = 1024;
163 
164 	spdk_io_device_register(&g_bdev.io_target, stub_create_ch, stub_destroy_ch,
165 				sizeof(struct ut_bdev_channel));
166 	spdk_bdev_register(&g_bdev.bdev);
167 }
168 
169 static void
170 unregister_bdev(void)
171 {
172 	/* Handle any deferred messages. */
173 	poll_threads();
174 	spdk_bdev_unregister(&g_bdev.bdev);
175 	spdk_io_device_unregister(&g_bdev.io_target, NULL);
176 	memset(&g_bdev, 0, sizeof(g_bdev));
177 }
178 
179 static void
180 bdev_init_cb(void *done, int rc)
181 {
182 	CU_ASSERT(rc == 0);
183 	*(bool *)done = true;
184 }
185 
186 static void
187 setup_test(void)
188 {
189 	bool done = false;
190 
191 	allocate_threads(BDEV_UT_NUM_THREADS);
192 	spdk_bdev_initialize(bdev_init_cb, &done, NULL, NULL);
193 	register_bdev();
194 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
195 }
196 
197 static void
198 teardown_test(void)
199 {
200 	spdk_bdev_close(g_desc);
201 	g_desc = NULL;
202 	unregister_bdev();
203 	spdk_bdev_finish();
204 	free_threads();
205 }
206 
207 static void
208 basic(void)
209 {
210 	setup_test();
211 
212 	set_thread(0);
213 
214 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
215 	spdk_put_io_channel(g_ut_threads[0].ch);
216 
217 	teardown_test();
218 }
219 
220 static void
221 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
222 {
223 	bool *done = cb_arg;
224 
225 	CU_ASSERT(success == true);
226 	*done = true;
227 	spdk_bdev_free_io(bdev_io);
228 }
229 
230 static void
231 put_channel_during_reset(void)
232 {
233 	struct spdk_io_channel *io_ch;
234 	bool done = false;
235 
236 	setup_test();
237 
238 	set_thread(0);
239 	io_ch = spdk_bdev_get_io_channel(g_desc);
240 	CU_ASSERT(io_ch != NULL);
241 
242 	/*
243 	 * Start a reset, but then put the I/O channel before
244 	 *  the deferred messages for the reset get a chance to
245 	 *  execute.
246 	 */
247 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
248 	spdk_put_io_channel(io_ch);
249 	poll_threads();
250 	stub_complete_io(0);
251 
252 	teardown_test();
253 }
254 
255 static void
256 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
257 {
258 	enum spdk_bdev_io_status *status = cb_arg;
259 
260 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
261 	spdk_bdev_free_io(bdev_io);
262 }
263 
264 static void
265 aborted_reset(void)
266 {
267 	struct spdk_io_channel *io_ch[2];
268 	enum spdk_bdev_io_status status1, status2;
269 
270 	setup_test();
271 
272 	set_thread(0);
273 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
274 	CU_ASSERT(io_ch[0] != NULL);
275 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
276 	poll_threads();
277 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
278 
279 	/*
280 	 * First reset has been submitted on ch0.  Now submit a second
281 	 *  reset on ch1 which will get queued since there is already a
282 	 *  reset in progress.
283 	 */
284 	set_thread(1);
285 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
286 	CU_ASSERT(io_ch[1] != NULL);
287 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
288 	poll_threads();
289 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
290 
291 	/*
292 	 * Now destroy ch1.  This will abort the queued reset.  Check that
293 	 *  the second reset was completed with failed status.  Also check
294 	 *  that bdev->reset_in_progress != NULL, since the original reset
295 	 *  has not been completed yet.  This ensures that the bdev code is
296 	 *  correctly noticing that the failed reset is *not* the one that
297 	 *  had been submitted to the bdev module.
298 	 */
299 	set_thread(1);
300 	spdk_put_io_channel(io_ch[1]);
301 	poll_threads();
302 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
303 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
304 
305 	/*
306 	 * Now complete the first reset, verify that it completed with SUCCESS
307 	 *  status and that bdev->reset_in_progress is also set back to NULL.
308 	 */
309 	set_thread(0);
310 	spdk_put_io_channel(io_ch[0]);
311 	stub_complete_io(0);
312 	poll_threads();
313 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
314 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
315 
316 	teardown_test();
317 }
318 
319 static void
320 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
321 {
322 	enum spdk_bdev_io_status *status = cb_arg;
323 
324 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
325 	spdk_bdev_free_io(bdev_io);
326 }
327 
328 static void
329 io_during_reset(void)
330 {
331 	struct spdk_io_channel *io_ch[2];
332 	struct spdk_bdev_channel *bdev_ch[2];
333 	enum spdk_bdev_io_status status0, status1, status_reset;
334 	int rc;
335 
336 	setup_test();
337 
338 	/*
339 	 * First test normal case - submit an I/O on each of two channels (with no resets)
340 	 *  and verify they complete successfully.
341 	 */
342 	set_thread(0);
343 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
344 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
345 	CU_ASSERT(bdev_ch[0]->flags == 0);
346 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
347 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
348 	CU_ASSERT(rc == 0);
349 
350 	set_thread(1);
351 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
352 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
353 	CU_ASSERT(bdev_ch[1]->flags == 0);
354 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
355 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
356 	CU_ASSERT(rc == 0);
357 
358 	poll_threads();
359 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
360 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
361 
362 	set_thread(0);
363 	stub_complete_io(0);
364 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
365 
366 	set_thread(1);
367 	stub_complete_io(0);
368 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
369 
370 	/*
371 	 * Now submit a reset, and leave it pending while we submit I?O on two different
372 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
373 	 *  progress.
374 	 */
375 	set_thread(0);
376 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
377 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset);
378 	CU_ASSERT(rc == 0);
379 
380 	CU_ASSERT(bdev_ch[0]->flags == 0);
381 	CU_ASSERT(bdev_ch[1]->flags == 0);
382 	poll_threads();
383 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
384 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
385 
386 	set_thread(0);
387 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
388 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
389 	CU_ASSERT(rc == 0);
390 
391 	set_thread(1);
392 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
393 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
394 	CU_ASSERT(rc == 0);
395 
396 	/*
397 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
398 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
399 	 */
400 	poll_threads();
401 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
402 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
403 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
404 
405 	set_thread(0);
406 	stub_complete_io(0);
407 	spdk_put_io_channel(io_ch[0]);
408 	set_thread(1);
409 	spdk_put_io_channel(io_ch[1]);
410 	poll_threads();
411 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
412 
413 	teardown_test();
414 }
415 
416 int
417 main(int argc, char **argv)
418 {
419 	CU_pSuite	suite = NULL;
420 	unsigned int	num_failures;
421 
422 	if (CU_initialize_registry() != CUE_SUCCESS) {
423 		return CU_get_error();
424 	}
425 
426 	suite = CU_add_suite("bdev", NULL, NULL);
427 	if (suite == NULL) {
428 		CU_cleanup_registry();
429 		return CU_get_error();
430 	}
431 
432 	if (
433 		CU_add_test(suite, "basic", basic) == NULL ||
434 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
435 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
436 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL
437 	) {
438 		CU_cleanup_registry();
439 		return CU_get_error();
440 	}
441 
442 	CU_basic_set_mode(CU_BRM_VERBOSE);
443 	CU_basic_run_tests();
444 	num_failures = CU_get_number_of_failures();
445 	CU_cleanup_registry();
446 	return num_failures;
447 }
448