xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision eb8b1e20a9c8a6bc79f32fde8693d2791a74c34d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	int			io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 struct ut_bdev g_bdev;
61 struct spdk_bdev_desc *g_desc;
62 
63 static int
64 stub_create_ch(void *io_device, void *ctx_buf)
65 {
66 	struct ut_bdev_channel *ch = ctx_buf;
67 
68 	TAILQ_INIT(&ch->outstanding_io);
69 	ch->outstanding_cnt = 0;
70 	/*
71 	 * When avail gets to 0, the submit_request function will return ENOMEM.
72 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
73 	 *  big value that won't get hit.  The ENOMEM tests can then override this
74 	 *  value to something much smaller to induce ENOMEM conditions.
75 	 */
76 	ch->avail_cnt = 2048;
77 	return 0;
78 }
79 
80 static void
81 stub_destroy_ch(void *io_device, void *ctx_buf)
82 {
83 }
84 
85 static struct spdk_io_channel *
86 stub_get_io_channel(void *ctx)
87 {
88 	return spdk_get_io_channel(&g_bdev.io_target);
89 }
90 
91 static int
92 stub_destruct(void *ctx)
93 {
94 	return 0;
95 }
96 
97 static void
98 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
99 {
100 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
101 
102 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
103 		struct spdk_bdev_io *io;
104 
105 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
106 			io = TAILQ_FIRST(&ch->outstanding_io);
107 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
108 			ch->outstanding_cnt--;
109 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
110 			ch->avail_cnt++;
111 		}
112 	}
113 
114 	if (ch->avail_cnt > 0) {
115 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
116 		ch->outstanding_cnt++;
117 		ch->avail_cnt--;
118 	} else {
119 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
120 	}
121 }
122 
123 static uint32_t
124 stub_complete_io(uint32_t num_to_complete)
125 {
126 	struct spdk_io_channel *_ch = spdk_get_io_channel(&g_bdev.io_target);
127 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
128 	struct spdk_bdev_io *io;
129 	bool complete_all = (num_to_complete == 0);
130 	uint32_t num_completed = 0;
131 
132 	while (complete_all || num_completed < num_to_complete) {
133 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
134 			break;
135 		}
136 		io = TAILQ_FIRST(&ch->outstanding_io);
137 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
138 		ch->outstanding_cnt--;
139 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
140 		ch->avail_cnt++;
141 		num_completed++;
142 	}
143 
144 	spdk_put_io_channel(_ch);
145 	return num_completed;
146 }
147 
148 static struct spdk_bdev_fn_table fn_table = {
149 	.get_io_channel =	stub_get_io_channel,
150 	.destruct =		stub_destruct,
151 	.submit_request =	stub_submit_request,
152 };
153 
154 static int
155 module_init(void)
156 {
157 	return 0;
158 }
159 
160 static void
161 module_fini(void)
162 {
163 }
164 
165 SPDK_BDEV_MODULE_REGISTER(bdev_ut, module_init, module_fini, NULL, NULL, NULL)
166 
167 static void
168 register_bdev(void)
169 {
170 	g_bdev.bdev.name = "bdev_ut";
171 	g_bdev.bdev.fn_table = &fn_table;
172 	g_bdev.bdev.module = SPDK_GET_BDEV_MODULE(bdev_ut);
173 	g_bdev.bdev.blocklen = 4096;
174 	g_bdev.bdev.blockcnt = 1024;
175 
176 	spdk_io_device_register(&g_bdev.io_target, stub_create_ch, stub_destroy_ch,
177 				sizeof(struct ut_bdev_channel));
178 	spdk_bdev_register(&g_bdev.bdev);
179 }
180 
181 static void
182 unregister_bdev(void)
183 {
184 	/* Handle any deferred messages. */
185 	poll_threads();
186 	spdk_bdev_unregister(&g_bdev.bdev);
187 	spdk_io_device_unregister(&g_bdev.io_target, NULL);
188 	memset(&g_bdev, 0, sizeof(g_bdev));
189 }
190 
191 static void
192 bdev_init_cb(void *done, int rc)
193 {
194 	CU_ASSERT(rc == 0);
195 	*(bool *)done = true;
196 }
197 
198 static void
199 setup_test(void)
200 {
201 	bool done = false;
202 
203 	allocate_threads(BDEV_UT_NUM_THREADS);
204 	spdk_bdev_initialize(bdev_init_cb, &done, NULL, NULL);
205 	register_bdev();
206 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
207 }
208 
209 static void
210 teardown_test(void)
211 {
212 	spdk_bdev_close(g_desc);
213 	g_desc = NULL;
214 	unregister_bdev();
215 	spdk_bdev_finish();
216 	free_threads();
217 }
218 
219 static void
220 basic(void)
221 {
222 	setup_test();
223 
224 	set_thread(0);
225 
226 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
227 	spdk_put_io_channel(g_ut_threads[0].ch);
228 
229 	teardown_test();
230 }
231 
232 static void
233 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
234 {
235 	bool *done = cb_arg;
236 
237 	CU_ASSERT(success == true);
238 	*done = true;
239 	spdk_bdev_free_io(bdev_io);
240 }
241 
242 static void
243 put_channel_during_reset(void)
244 {
245 	struct spdk_io_channel *io_ch;
246 	bool done = false;
247 
248 	setup_test();
249 
250 	set_thread(0);
251 	io_ch = spdk_bdev_get_io_channel(g_desc);
252 	CU_ASSERT(io_ch != NULL);
253 
254 	/*
255 	 * Start a reset, but then put the I/O channel before
256 	 *  the deferred messages for the reset get a chance to
257 	 *  execute.
258 	 */
259 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
260 	spdk_put_io_channel(io_ch);
261 	poll_threads();
262 	stub_complete_io(0);
263 
264 	teardown_test();
265 }
266 
267 static void
268 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
269 {
270 	enum spdk_bdev_io_status *status = cb_arg;
271 
272 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
273 	spdk_bdev_free_io(bdev_io);
274 }
275 
276 static void
277 aborted_reset(void)
278 {
279 	struct spdk_io_channel *io_ch[2];
280 	enum spdk_bdev_io_status status1, status2;
281 
282 	setup_test();
283 
284 	set_thread(0);
285 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
286 	CU_ASSERT(io_ch[0] != NULL);
287 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
288 	poll_threads();
289 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
290 
291 	/*
292 	 * First reset has been submitted on ch0.  Now submit a second
293 	 *  reset on ch1 which will get queued since there is already a
294 	 *  reset in progress.
295 	 */
296 	set_thread(1);
297 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
298 	CU_ASSERT(io_ch[1] != NULL);
299 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
300 	poll_threads();
301 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
302 
303 	/*
304 	 * Now destroy ch1.  This will abort the queued reset.  Check that
305 	 *  the second reset was completed with failed status.  Also check
306 	 *  that bdev->reset_in_progress != NULL, since the original reset
307 	 *  has not been completed yet.  This ensures that the bdev code is
308 	 *  correctly noticing that the failed reset is *not* the one that
309 	 *  had been submitted to the bdev module.
310 	 */
311 	set_thread(1);
312 	spdk_put_io_channel(io_ch[1]);
313 	poll_threads();
314 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
315 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
316 
317 	/*
318 	 * Now complete the first reset, verify that it completed with SUCCESS
319 	 *  status and that bdev->reset_in_progress is also set back to NULL.
320 	 */
321 	set_thread(0);
322 	spdk_put_io_channel(io_ch[0]);
323 	stub_complete_io(0);
324 	poll_threads();
325 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
326 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
327 
328 	teardown_test();
329 }
330 
331 static void
332 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
333 {
334 	enum spdk_bdev_io_status *status = cb_arg;
335 
336 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
337 	spdk_bdev_free_io(bdev_io);
338 }
339 
340 static void
341 io_during_reset(void)
342 {
343 	struct spdk_io_channel *io_ch[2];
344 	struct spdk_bdev_channel *bdev_ch[2];
345 	enum spdk_bdev_io_status status0, status1, status_reset;
346 	int rc;
347 
348 	setup_test();
349 
350 	/*
351 	 * First test normal case - submit an I/O on each of two channels (with no resets)
352 	 *  and verify they complete successfully.
353 	 */
354 	set_thread(0);
355 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
356 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
357 	CU_ASSERT(bdev_ch[0]->flags == 0);
358 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
359 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
360 	CU_ASSERT(rc == 0);
361 
362 	set_thread(1);
363 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
364 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
365 	CU_ASSERT(bdev_ch[1]->flags == 0);
366 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
367 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
368 	CU_ASSERT(rc == 0);
369 
370 	poll_threads();
371 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
372 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
373 
374 	set_thread(0);
375 	stub_complete_io(0);
376 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
377 
378 	set_thread(1);
379 	stub_complete_io(0);
380 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
381 
382 	/*
383 	 * Now submit a reset, and leave it pending while we submit I?O on two different
384 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
385 	 *  progress.
386 	 */
387 	set_thread(0);
388 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
389 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset);
390 	CU_ASSERT(rc == 0);
391 
392 	CU_ASSERT(bdev_ch[0]->flags == 0);
393 	CU_ASSERT(bdev_ch[1]->flags == 0);
394 	poll_threads();
395 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
396 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
397 
398 	set_thread(0);
399 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
400 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
401 	CU_ASSERT(rc == 0);
402 
403 	set_thread(1);
404 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
405 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
406 	CU_ASSERT(rc == 0);
407 
408 	/*
409 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
410 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
411 	 */
412 	poll_threads();
413 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
414 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
415 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
416 
417 	set_thread(0);
418 	stub_complete_io(0);
419 	spdk_put_io_channel(io_ch[0]);
420 	set_thread(1);
421 	spdk_put_io_channel(io_ch[1]);
422 	poll_threads();
423 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
424 
425 	teardown_test();
426 }
427 
428 static void
429 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
430 {
431 	enum spdk_bdev_io_status *status = cb_arg;
432 
433 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
434 	spdk_bdev_free_io(bdev_io);
435 }
436 
437 static uint32_t
438 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
439 {
440 	struct spdk_bdev_io *io;
441 	uint32_t cnt = 0;
442 
443 	TAILQ_FOREACH(io, tailq, link) {
444 		cnt++;
445 	}
446 
447 	return cnt;
448 }
449 
450 static void
451 enomem(void)
452 {
453 	struct spdk_io_channel *io_ch;
454 	struct spdk_bdev_channel *bdev_ch;
455 	struct ut_bdev_channel *ut_ch;
456 	const uint32_t IO_ARRAY_SIZE = 64;
457 	const uint32_t AVAIL = 20;
458 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
459 	uint32_t nomem_cnt, i;
460 	struct spdk_bdev_io *first_io;
461 	int rc;
462 
463 	setup_test();
464 
465 	set_thread(0);
466 	io_ch = spdk_bdev_get_io_channel(g_desc);
467 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
468 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
469 	ut_ch->avail_cnt = AVAIL;
470 
471 	/* First submit a number of IOs equal to what the channel can support. */
472 	for (i = 0; i < AVAIL; i++) {
473 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
474 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
475 		CU_ASSERT(rc == 0);
476 	}
477 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->nomem_io));
478 
479 	/*
480 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
481 	 *  the enomem_io list.
482 	 */
483 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
484 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
485 	CU_ASSERT(rc == 0);
486 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->nomem_io));
487 	first_io = TAILQ_FIRST(&bdev_ch->nomem_io);
488 
489 	/*
490 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
491 	 *  the first_io above.
492 	 */
493 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
494 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
495 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
496 		CU_ASSERT(rc == 0);
497 	}
498 
499 	/* Assert that first_io is still at the head of the list. */
500 	CU_ASSERT(TAILQ_FIRST(&bdev_ch->nomem_io) == first_io);
501 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
502 	nomem_cnt = bdev_io_tailq_cnt(&bdev_ch->nomem_io);
503 	CU_ASSERT(bdev_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
504 
505 	/*
506 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
507 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
508 	 *  list.
509 	 */
510 	stub_complete_io(1);
511 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == nomem_cnt);
512 
513 	/*
514 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
515 	 *  and we should see I/O get resubmitted to the test bdev module.
516 	 */
517 	stub_complete_io(NOMEM_THRESHOLD_COUNT - 1);
518 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) < nomem_cnt);
519 	nomem_cnt = bdev_io_tailq_cnt(&bdev_ch->nomem_io);
520 
521 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
522 	stub_complete_io(1);
523 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == nomem_cnt);
524 
525 	/*
526 	 * Send a reset and confirm that all I/O are completed, including the ones that
527 	 *  were queued on the nomem_io list.
528 	 */
529 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
530 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
531 	poll_threads();
532 	CU_ASSERT(rc == 0);
533 	/* This will complete the reset. */
534 	stub_complete_io(0);
535 
536 	CU_ASSERT(bdev_io_tailq_cnt(&bdev_ch->nomem_io) == 0);
537 	CU_ASSERT(bdev_ch->io_outstanding == 0);
538 
539 	spdk_put_io_channel(io_ch);
540 	poll_threads();
541 	teardown_test();
542 }
543 
544 int
545 main(int argc, char **argv)
546 {
547 	CU_pSuite	suite = NULL;
548 	unsigned int	num_failures;
549 
550 	if (CU_initialize_registry() != CUE_SUCCESS) {
551 		return CU_get_error();
552 	}
553 
554 	suite = CU_add_suite("bdev", NULL, NULL);
555 	if (suite == NULL) {
556 		CU_cleanup_registry();
557 		return CU_get_error();
558 	}
559 
560 	if (
561 		CU_add_test(suite, "basic", basic) == NULL ||
562 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
563 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
564 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
565 		CU_add_test(suite, "enomem", enomem) == NULL
566 	) {
567 		CU_cleanup_registry();
568 		return CU_get_error();
569 	}
570 
571 	CU_basic_set_mode(CU_BRM_VERBOSE);
572 	CU_basic_run_tests();
573 	num_failures = CU_get_number_of_failures();
574 	CU_cleanup_registry();
575 	return num_failures;
576 }
577