xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 583a24a4898cca00b54eef9f0b324d66d88e1e62)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	void			*io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 int g_io_device;
61 struct ut_bdev g_bdev;
62 struct spdk_bdev_desc *g_desc;
63 bool g_teardown_done = false;
64 
65 static int
66 stub_create_ch(void *io_device, void *ctx_buf)
67 {
68 	struct ut_bdev_channel *ch = ctx_buf;
69 
70 	TAILQ_INIT(&ch->outstanding_io);
71 	ch->outstanding_cnt = 0;
72 	/*
73 	 * When avail gets to 0, the submit_request function will return ENOMEM.
74 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
75 	 *  big value that won't get hit.  The ENOMEM tests can then override this
76 	 *  value to something much smaller to induce ENOMEM conditions.
77 	 */
78 	ch->avail_cnt = 2048;
79 	return 0;
80 }
81 
82 static void
83 stub_destroy_ch(void *io_device, void *ctx_buf)
84 {
85 }
86 
87 static struct spdk_io_channel *
88 stub_get_io_channel(void *ctx)
89 {
90 	struct ut_bdev *ut_bdev = ctx;
91 
92 	return spdk_get_io_channel(ut_bdev->io_target);
93 }
94 
95 static int
96 stub_destruct(void *ctx)
97 {
98 	return 0;
99 }
100 
101 static void
102 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
103 {
104 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
105 
106 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
107 		struct spdk_bdev_io *io;
108 
109 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
110 			io = TAILQ_FIRST(&ch->outstanding_io);
111 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
112 			ch->outstanding_cnt--;
113 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
114 			ch->avail_cnt++;
115 		}
116 	}
117 
118 	if (ch->avail_cnt > 0) {
119 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
120 		ch->outstanding_cnt++;
121 		ch->avail_cnt--;
122 	} else {
123 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
124 	}
125 }
126 
127 static uint32_t
128 stub_complete_io(void *io_target, uint32_t num_to_complete)
129 {
130 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
131 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
132 	struct spdk_bdev_io *io;
133 	bool complete_all = (num_to_complete == 0);
134 	uint32_t num_completed = 0;
135 
136 	while (complete_all || num_completed < num_to_complete) {
137 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
138 			break;
139 		}
140 		io = TAILQ_FIRST(&ch->outstanding_io);
141 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
142 		ch->outstanding_cnt--;
143 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
144 		ch->avail_cnt++;
145 		num_completed++;
146 	}
147 
148 	spdk_put_io_channel(_ch);
149 	return num_completed;
150 }
151 
152 static struct spdk_bdev_fn_table fn_table = {
153 	.get_io_channel =	stub_get_io_channel,
154 	.destruct =		stub_destruct,
155 	.submit_request =	stub_submit_request,
156 };
157 
158 static int
159 module_init(void)
160 {
161 	return 0;
162 }
163 
164 static void
165 module_fini(void)
166 {
167 }
168 
169 SPDK_BDEV_MODULE_REGISTER(bdev_ut, module_init, module_fini, NULL, NULL, NULL)
170 
171 static void
172 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
173 {
174 	memset(ut_bdev, 0, sizeof(*ut_bdev));
175 
176 	ut_bdev->io_target = io_target;
177 	ut_bdev->bdev.ctxt = ut_bdev;
178 	ut_bdev->bdev.name = name;
179 	ut_bdev->bdev.fn_table = &fn_table;
180 	ut_bdev->bdev.module = SPDK_GET_BDEV_MODULE(bdev_ut);
181 	ut_bdev->bdev.blocklen = 4096;
182 	ut_bdev->bdev.blockcnt = 1024;
183 
184 	spdk_bdev_register(&ut_bdev->bdev);
185 }
186 
187 static void
188 unregister_bdev(struct ut_bdev *ut_bdev)
189 {
190 	/* Handle any deferred messages. */
191 	poll_threads();
192 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
193 	memset(ut_bdev, 0, sizeof(*ut_bdev));
194 }
195 
196 static void
197 bdev_init_cb(void *done, int rc)
198 {
199 	CU_ASSERT(rc == 0);
200 	*(bool *)done = true;
201 }
202 
203 static void
204 setup_test(void)
205 {
206 	bool done = false;
207 
208 	allocate_threads(BDEV_UT_NUM_THREADS);
209 	spdk_bdev_initialize(bdev_init_cb, &done);
210 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
211 				sizeof(struct ut_bdev_channel));
212 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
213 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
214 }
215 
216 static void
217 finish_cb(void *cb_arg)
218 {
219 	g_teardown_done = true;
220 }
221 
222 static void
223 teardown_test(void)
224 {
225 	g_teardown_done = false;
226 	spdk_bdev_close(g_desc);
227 	g_desc = NULL;
228 	unregister_bdev(&g_bdev);
229 	spdk_io_device_unregister(&g_io_device, NULL);
230 	spdk_bdev_finish(finish_cb, NULL);
231 	poll_threads();
232 	CU_ASSERT(g_teardown_done == true);
233 	g_teardown_done = false;
234 	free_threads();
235 }
236 
237 static void
238 basic(void)
239 {
240 	setup_test();
241 
242 	set_thread(0);
243 
244 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
245 	spdk_put_io_channel(g_ut_threads[0].ch);
246 
247 	teardown_test();
248 }
249 
250 static void
251 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
252 {
253 	bool *done = cb_arg;
254 
255 	CU_ASSERT(success == true);
256 	*done = true;
257 	spdk_bdev_free_io(bdev_io);
258 }
259 
260 static void
261 put_channel_during_reset(void)
262 {
263 	struct spdk_io_channel *io_ch;
264 	bool done = false;
265 
266 	setup_test();
267 
268 	set_thread(0);
269 	io_ch = spdk_bdev_get_io_channel(g_desc);
270 	CU_ASSERT(io_ch != NULL);
271 
272 	/*
273 	 * Start a reset, but then put the I/O channel before
274 	 *  the deferred messages for the reset get a chance to
275 	 *  execute.
276 	 */
277 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
278 	spdk_put_io_channel(io_ch);
279 	poll_threads();
280 	stub_complete_io(g_bdev.io_target, 0);
281 
282 	teardown_test();
283 }
284 
285 static void
286 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
287 {
288 	enum spdk_bdev_io_status *status = cb_arg;
289 
290 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
291 	spdk_bdev_free_io(bdev_io);
292 }
293 
294 static void
295 aborted_reset(void)
296 {
297 	struct spdk_io_channel *io_ch[2];
298 	enum spdk_bdev_io_status status1, status2;
299 
300 	setup_test();
301 
302 	set_thread(0);
303 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
304 	CU_ASSERT(io_ch[0] != NULL);
305 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
306 	poll_threads();
307 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
308 
309 	/*
310 	 * First reset has been submitted on ch0.  Now submit a second
311 	 *  reset on ch1 which will get queued since there is already a
312 	 *  reset in progress.
313 	 */
314 	set_thread(1);
315 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
316 	CU_ASSERT(io_ch[1] != NULL);
317 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
318 	poll_threads();
319 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
320 
321 	/*
322 	 * Now destroy ch1.  This will abort the queued reset.  Check that
323 	 *  the second reset was completed with failed status.  Also check
324 	 *  that bdev->reset_in_progress != NULL, since the original reset
325 	 *  has not been completed yet.  This ensures that the bdev code is
326 	 *  correctly noticing that the failed reset is *not* the one that
327 	 *  had been submitted to the bdev module.
328 	 */
329 	set_thread(1);
330 	spdk_put_io_channel(io_ch[1]);
331 	poll_threads();
332 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
333 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
334 
335 	/*
336 	 * Now complete the first reset, verify that it completed with SUCCESS
337 	 *  status and that bdev->reset_in_progress is also set back to NULL.
338 	 */
339 	set_thread(0);
340 	spdk_put_io_channel(io_ch[0]);
341 	stub_complete_io(g_bdev.io_target, 0);
342 	poll_threads();
343 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
344 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
345 
346 	teardown_test();
347 }
348 
349 static void
350 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
351 {
352 	enum spdk_bdev_io_status *status = cb_arg;
353 
354 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
355 	spdk_bdev_free_io(bdev_io);
356 }
357 
358 static void
359 io_during_reset(void)
360 {
361 	struct spdk_io_channel *io_ch[2];
362 	struct spdk_bdev_channel *bdev_ch[2];
363 	enum spdk_bdev_io_status status0, status1, status_reset;
364 	int rc;
365 
366 	setup_test();
367 
368 	/*
369 	 * First test normal case - submit an I/O on each of two channels (with no resets)
370 	 *  and verify they complete successfully.
371 	 */
372 	set_thread(0);
373 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
374 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
375 	CU_ASSERT(bdev_ch[0]->flags == 0);
376 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
377 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
378 	CU_ASSERT(rc == 0);
379 
380 	set_thread(1);
381 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
382 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
383 	CU_ASSERT(bdev_ch[1]->flags == 0);
384 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
385 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
386 	CU_ASSERT(rc == 0);
387 
388 	poll_threads();
389 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
390 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
391 
392 	set_thread(0);
393 	stub_complete_io(g_bdev.io_target, 0);
394 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
395 
396 	set_thread(1);
397 	stub_complete_io(g_bdev.io_target, 0);
398 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
399 
400 	/*
401 	 * Now submit a reset, and leave it pending while we submit I/O on two different
402 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
403 	 *  progress.
404 	 */
405 	set_thread(0);
406 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
407 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset);
408 	CU_ASSERT(rc == 0);
409 
410 	CU_ASSERT(bdev_ch[0]->flags == 0);
411 	CU_ASSERT(bdev_ch[1]->flags == 0);
412 	poll_threads();
413 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
414 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
415 
416 	set_thread(0);
417 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
418 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
419 	CU_ASSERT(rc == 0);
420 
421 	set_thread(1);
422 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
423 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
424 	CU_ASSERT(rc == 0);
425 
426 	/*
427 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
428 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
429 	 */
430 	poll_threads();
431 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
432 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
433 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
434 
435 	/*
436 	 * Complete the reset
437 	 */
438 	set_thread(0);
439 	stub_complete_io(g_bdev.io_target, 0);
440 
441 	/*
442 	 * Only poll thread 0. We should not get a completion.
443 	 */
444 	poll_thread(0);
445 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
446 
447 	/*
448 	 * Poll both thread 0 and 1 so the messages can propagate and we
449 	 * get a completion.
450 	 */
451 	poll_threads();
452 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
453 
454 	spdk_put_io_channel(io_ch[0]);
455 	set_thread(1);
456 	spdk_put_io_channel(io_ch[1]);
457 	poll_threads();
458 
459 	teardown_test();
460 }
461 
462 static void
463 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
464 {
465 	enum spdk_bdev_io_status *status = cb_arg;
466 
467 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
468 	spdk_bdev_free_io(bdev_io);
469 }
470 
471 static uint32_t
472 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
473 {
474 	struct spdk_bdev_io *io;
475 	uint32_t cnt = 0;
476 
477 	TAILQ_FOREACH(io, tailq, link) {
478 		cnt++;
479 	}
480 
481 	return cnt;
482 }
483 
484 static void
485 enomem(void)
486 {
487 	struct spdk_io_channel *io_ch;
488 	struct spdk_bdev_channel *bdev_ch;
489 	struct spdk_bdev_module_channel *module_ch;
490 	struct ut_bdev_channel *ut_ch;
491 	const uint32_t IO_ARRAY_SIZE = 64;
492 	const uint32_t AVAIL = 20;
493 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
494 	uint32_t nomem_cnt, i;
495 	struct spdk_bdev_io *first_io;
496 	int rc;
497 
498 	setup_test();
499 
500 	set_thread(0);
501 	io_ch = spdk_bdev_get_io_channel(g_desc);
502 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
503 	module_ch = bdev_ch->module_ch;
504 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
505 	ut_ch->avail_cnt = AVAIL;
506 
507 	/* First submit a number of IOs equal to what the channel can support. */
508 	for (i = 0; i < AVAIL; i++) {
509 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
510 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
511 		CU_ASSERT(rc == 0);
512 	}
513 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
514 
515 	/*
516 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
517 	 *  the enomem_io list.
518 	 */
519 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
520 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
521 	CU_ASSERT(rc == 0);
522 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
523 	first_io = TAILQ_FIRST(&module_ch->nomem_io);
524 
525 	/*
526 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
527 	 *  the first_io above.
528 	 */
529 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
530 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
531 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
532 		CU_ASSERT(rc == 0);
533 	}
534 
535 	/* Assert that first_io is still at the head of the list. */
536 	CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io);
537 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
538 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
539 	CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
540 
541 	/*
542 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
543 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
544 	 *  list.
545 	 */
546 	stub_complete_io(g_bdev.io_target, 1);
547 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
548 
549 	/*
550 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
551 	 *  and we should see I/O get resubmitted to the test bdev module.
552 	 */
553 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
554 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt);
555 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
556 
557 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
558 	stub_complete_io(g_bdev.io_target, 1);
559 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
560 
561 	/*
562 	 * Send a reset and confirm that all I/O are completed, including the ones that
563 	 *  were queued on the nomem_io list.
564 	 */
565 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
566 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
567 	poll_threads();
568 	CU_ASSERT(rc == 0);
569 	/* This will complete the reset. */
570 	stub_complete_io(g_bdev.io_target, 0);
571 
572 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0);
573 	CU_ASSERT(module_ch->io_outstanding == 0);
574 
575 	spdk_put_io_channel(io_ch);
576 	poll_threads();
577 	teardown_test();
578 }
579 
580 static void
581 enomem_multi_bdev(void)
582 {
583 	struct spdk_io_channel *io_ch;
584 	struct spdk_bdev_channel *bdev_ch;
585 	struct spdk_bdev_module_channel *module_ch;
586 	struct ut_bdev_channel *ut_ch;
587 	const uint32_t IO_ARRAY_SIZE = 64;
588 	const uint32_t AVAIL = 20;
589 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
590 	uint32_t i;
591 	struct ut_bdev *second_bdev;
592 	struct spdk_bdev_desc *second_desc;
593 	struct spdk_bdev_channel *second_bdev_ch;
594 	struct spdk_io_channel *second_ch;
595 	int rc;
596 
597 	setup_test();
598 
599 	/* Register second bdev with the same io_target  */
600 	second_bdev = calloc(1, sizeof(*second_bdev));
601 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
602 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
603 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
604 
605 	set_thread(0);
606 	io_ch = spdk_bdev_get_io_channel(g_desc);
607 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
608 	module_ch = bdev_ch->module_ch;
609 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
610 	ut_ch->avail_cnt = AVAIL;
611 
612 	second_ch = spdk_bdev_get_io_channel(second_desc);
613 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
614 	SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch);
615 
616 	/* Saturate io_target through bdev A. */
617 	for (i = 0; i < AVAIL; i++) {
618 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
619 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
620 		CU_ASSERT(rc == 0);
621 	}
622 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
623 
624 	/*
625 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
626 	 * and then go onto the nomem_io list.
627 	 */
628 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
629 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
630 	CU_ASSERT(rc == 0);
631 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
632 
633 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
634 	stub_complete_io(g_bdev.io_target, AVAIL);
635 
636 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io));
637 	CU_ASSERT(module_ch->io_outstanding == 1);
638 
639 	/* Now complete our retried I/O  */
640 	stub_complete_io(g_bdev.io_target, 1);
641 	SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0);
642 
643 	spdk_put_io_channel(io_ch);
644 	spdk_put_io_channel(second_ch);
645 	spdk_bdev_close(second_desc);
646 	unregister_bdev(second_bdev);
647 	free(second_bdev);
648 	poll_threads();
649 	teardown_test();
650 }
651 
652 int
653 main(int argc, char **argv)
654 {
655 	CU_pSuite	suite = NULL;
656 	unsigned int	num_failures;
657 
658 	if (CU_initialize_registry() != CUE_SUCCESS) {
659 		return CU_get_error();
660 	}
661 
662 	suite = CU_add_suite("bdev", NULL, NULL);
663 	if (suite == NULL) {
664 		CU_cleanup_registry();
665 		return CU_get_error();
666 	}
667 
668 	if (
669 		CU_add_test(suite, "basic", basic) == NULL ||
670 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
671 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
672 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
673 		CU_add_test(suite, "enomem", enomem) == NULL ||
674 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL
675 	) {
676 		CU_cleanup_registry();
677 		return CU_get_error();
678 	}
679 
680 	CU_basic_set_mode(CU_BRM_VERBOSE);
681 	CU_basic_run_tests();
682 	num_failures = CU_get_number_of_failures();
683 	CU_cleanup_registry();
684 	return num_failures;
685 }
686