xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 4d36735401a968630e14533383f8d8b8fd610b3a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev/bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	void			*io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 int g_io_device;
61 struct ut_bdev g_bdev;
62 struct spdk_bdev_desc *g_desc;
63 bool g_teardown_done = false;
64 bool g_get_io_channel = true;
65 bool g_create_ch = true;
66 
67 static int
68 stub_create_ch(void *io_device, void *ctx_buf)
69 {
70 	struct ut_bdev_channel *ch = ctx_buf;
71 
72 	if (g_create_ch == false) {
73 		return -1;
74 	}
75 
76 	TAILQ_INIT(&ch->outstanding_io);
77 	ch->outstanding_cnt = 0;
78 	/*
79 	 * When avail gets to 0, the submit_request function will return ENOMEM.
80 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
81 	 *  big value that won't get hit.  The ENOMEM tests can then override this
82 	 *  value to something much smaller to induce ENOMEM conditions.
83 	 */
84 	ch->avail_cnt = 2048;
85 	return 0;
86 }
87 
88 static void
89 stub_destroy_ch(void *io_device, void *ctx_buf)
90 {
91 }
92 
93 static struct spdk_io_channel *
94 stub_get_io_channel(void *ctx)
95 {
96 	struct ut_bdev *ut_bdev = ctx;
97 
98 	if (g_get_io_channel == true) {
99 		return spdk_get_io_channel(ut_bdev->io_target);
100 	} else {
101 		return NULL;
102 	}
103 }
104 
105 static int
106 stub_destruct(void *ctx)
107 {
108 	return 0;
109 }
110 
111 static void
112 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
113 {
114 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
115 
116 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
117 		struct spdk_bdev_io *io;
118 
119 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
120 			io = TAILQ_FIRST(&ch->outstanding_io);
121 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
122 			ch->outstanding_cnt--;
123 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
124 			ch->avail_cnt++;
125 		}
126 	}
127 
128 	if (ch->avail_cnt > 0) {
129 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
130 		ch->outstanding_cnt++;
131 		ch->avail_cnt--;
132 	} else {
133 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
134 	}
135 }
136 
137 static uint32_t
138 stub_complete_io(void *io_target, uint32_t num_to_complete)
139 {
140 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
141 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
142 	struct spdk_bdev_io *io;
143 	bool complete_all = (num_to_complete == 0);
144 	uint32_t num_completed = 0;
145 
146 	while (complete_all || num_completed < num_to_complete) {
147 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
148 			break;
149 		}
150 		io = TAILQ_FIRST(&ch->outstanding_io);
151 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
152 		ch->outstanding_cnt--;
153 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
154 		ch->avail_cnt++;
155 		num_completed++;
156 	}
157 
158 	spdk_put_io_channel(_ch);
159 	return num_completed;
160 }
161 
162 static struct spdk_bdev_fn_table fn_table = {
163 	.get_io_channel =	stub_get_io_channel,
164 	.destruct =		stub_destruct,
165 	.submit_request =	stub_submit_request,
166 };
167 
168 static int
169 module_init(void)
170 {
171 	return 0;
172 }
173 
174 static void
175 module_fini(void)
176 {
177 }
178 
179 struct spdk_bdev_module_if bdev_ut_if = {
180 	.name = "bdev_ut",
181 	.module_init = module_init,
182 	.module_fini = module_fini,
183 };
184 
185 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
186 
187 static void
188 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
189 {
190 	memset(ut_bdev, 0, sizeof(*ut_bdev));
191 
192 	ut_bdev->io_target = io_target;
193 	ut_bdev->bdev.ctxt = ut_bdev;
194 	ut_bdev->bdev.name = name;
195 	ut_bdev->bdev.fn_table = &fn_table;
196 	ut_bdev->bdev.module = &bdev_ut_if;
197 	ut_bdev->bdev.blocklen = 4096;
198 	ut_bdev->bdev.blockcnt = 1024;
199 
200 	spdk_bdev_register(&ut_bdev->bdev);
201 }
202 
203 static void
204 unregister_bdev(struct ut_bdev *ut_bdev)
205 {
206 	/* Handle any deferred messages. */
207 	poll_threads();
208 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
209 	memset(ut_bdev, 0, sizeof(*ut_bdev));
210 }
211 
212 static void
213 bdev_init_cb(void *done, int rc)
214 {
215 	CU_ASSERT(rc == 0);
216 	*(bool *)done = true;
217 }
218 
219 static void
220 setup_test(void)
221 {
222 	bool done = false;
223 
224 	allocate_threads(BDEV_UT_NUM_THREADS);
225 	spdk_bdev_initialize(bdev_init_cb, &done);
226 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
227 				sizeof(struct ut_bdev_channel));
228 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
229 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
230 }
231 
232 static void
233 finish_cb(void *cb_arg)
234 {
235 	g_teardown_done = true;
236 }
237 
238 static void
239 teardown_test(void)
240 {
241 	g_teardown_done = false;
242 	spdk_bdev_close(g_desc);
243 	g_desc = NULL;
244 	unregister_bdev(&g_bdev);
245 	spdk_io_device_unregister(&g_io_device, NULL);
246 	spdk_bdev_finish(finish_cb, NULL);
247 	poll_threads();
248 	CU_ASSERT(g_teardown_done == true);
249 	g_teardown_done = false;
250 	free_threads();
251 }
252 
253 static void
254 basic(void)
255 {
256 	setup_test();
257 
258 	set_thread(0);
259 
260 	g_get_io_channel = false;
261 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
262 	CU_ASSERT(g_ut_threads[0].ch == NULL);
263 
264 	g_get_io_channel = true;
265 	g_create_ch = false;
266 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
267 	CU_ASSERT(g_ut_threads[0].ch == NULL);
268 
269 	g_get_io_channel = true;
270 	g_create_ch = true;
271 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
272 	CU_ASSERT(g_ut_threads[0].ch != NULL);
273 	spdk_put_io_channel(g_ut_threads[0].ch);
274 
275 	teardown_test();
276 }
277 
278 static void
279 poller_run_done(void *ctx)
280 {
281 	bool	*poller_run = ctx;
282 
283 	*poller_run = true;
284 }
285 
286 static void
287 poller_run_times_done(void *ctx)
288 {
289 	int	*poller_run_times = ctx;
290 
291 	(*poller_run_times)++;
292 }
293 
294 static void
295 basic_poller(void)
296 {
297 	struct spdk_poller	*poller = NULL;
298 	bool			poller_run = false;
299 	int			poller_run_times = 0;
300 
301 	setup_test();
302 
303 	set_thread(0);
304 	reset_time();
305 	/* Register a poller with no-wait time and test execution */
306 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
307 	CU_ASSERT(poller != NULL);
308 
309 	poll_threads();
310 	CU_ASSERT(poller_run == true);
311 
312 	spdk_poller_unregister(&poller);
313 	CU_ASSERT(poller == NULL);
314 
315 	/* Register a poller with 1000us wait time and test single execution */
316 	poller_run = false;
317 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
318 	CU_ASSERT(poller != NULL);
319 
320 	poll_threads();
321 	CU_ASSERT(poller_run == false);
322 
323 	increment_time(1000);
324 	poll_threads();
325 	CU_ASSERT(poller_run == true);
326 
327 	reset_time();
328 	poller_run = false;
329 	poll_threads();
330 	CU_ASSERT(poller_run == false);
331 
332 	increment_time(1000);
333 	poll_threads();
334 	CU_ASSERT(poller_run == true);
335 
336 	spdk_poller_unregister(&poller);
337 	CU_ASSERT(poller == NULL);
338 
339 	reset_time();
340 	/* Register a poller with 1000us wait time and test multiple execution */
341 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
342 	CU_ASSERT(poller != NULL);
343 
344 	poll_threads();
345 	CU_ASSERT(poller_run_times == 0);
346 
347 	increment_time(1000);
348 	poll_threads();
349 	CU_ASSERT(poller_run_times == 1);
350 
351 	poller_run_times = 0;
352 	increment_time(2000);
353 	poll_threads();
354 	CU_ASSERT(poller_run_times == 2);
355 
356 	spdk_poller_unregister(&poller);
357 	CU_ASSERT(poller == NULL);
358 
359 	teardown_test();
360 }
361 
362 static void
363 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
364 {
365 	bool *done = cb_arg;
366 
367 	CU_ASSERT(success == true);
368 	*done = true;
369 	spdk_bdev_free_io(bdev_io);
370 }
371 
372 static void
373 put_channel_during_reset(void)
374 {
375 	struct spdk_io_channel *io_ch;
376 	bool done = false;
377 
378 	setup_test();
379 
380 	set_thread(0);
381 	io_ch = spdk_bdev_get_io_channel(g_desc);
382 	CU_ASSERT(io_ch != NULL);
383 
384 	/*
385 	 * Start a reset, but then put the I/O channel before
386 	 *  the deferred messages for the reset get a chance to
387 	 *  execute.
388 	 */
389 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
390 	spdk_put_io_channel(io_ch);
391 	poll_threads();
392 	stub_complete_io(g_bdev.io_target, 0);
393 
394 	teardown_test();
395 }
396 
397 static void
398 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
399 {
400 	enum spdk_bdev_io_status *status = cb_arg;
401 
402 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
403 	spdk_bdev_free_io(bdev_io);
404 }
405 
406 static void
407 aborted_reset(void)
408 {
409 	struct spdk_io_channel *io_ch[2];
410 	enum spdk_bdev_io_status status1, status2;
411 
412 	setup_test();
413 
414 	set_thread(0);
415 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
416 	CU_ASSERT(io_ch[0] != NULL);
417 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
418 	poll_threads();
419 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
420 
421 	/*
422 	 * First reset has been submitted on ch0.  Now submit a second
423 	 *  reset on ch1 which will get queued since there is already a
424 	 *  reset in progress.
425 	 */
426 	set_thread(1);
427 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
428 	CU_ASSERT(io_ch[1] != NULL);
429 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
430 	poll_threads();
431 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
432 
433 	/*
434 	 * Now destroy ch1.  This will abort the queued reset.  Check that
435 	 *  the second reset was completed with failed status.  Also check
436 	 *  that bdev->reset_in_progress != NULL, since the original reset
437 	 *  has not been completed yet.  This ensures that the bdev code is
438 	 *  correctly noticing that the failed reset is *not* the one that
439 	 *  had been submitted to the bdev module.
440 	 */
441 	set_thread(1);
442 	spdk_put_io_channel(io_ch[1]);
443 	poll_threads();
444 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
445 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
446 
447 	/*
448 	 * Now complete the first reset, verify that it completed with SUCCESS
449 	 *  status and that bdev->reset_in_progress is also set back to NULL.
450 	 */
451 	set_thread(0);
452 	spdk_put_io_channel(io_ch[0]);
453 	stub_complete_io(g_bdev.io_target, 0);
454 	poll_threads();
455 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
456 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
457 
458 	teardown_test();
459 }
460 
461 static void
462 io_during_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
463 {
464 	enum spdk_bdev_io_status *status = cb_arg;
465 
466 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
467 	spdk_bdev_free_io(bdev_io);
468 }
469 
470 static void
471 io_during_reset(void)
472 {
473 	struct spdk_io_channel *io_ch[2];
474 	struct spdk_bdev_channel *bdev_ch[2];
475 	enum spdk_bdev_io_status status0, status1, status_reset;
476 	int rc;
477 
478 	setup_test();
479 
480 	/*
481 	 * First test normal case - submit an I/O on each of two channels (with no resets)
482 	 *  and verify they complete successfully.
483 	 */
484 	set_thread(0);
485 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
486 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
487 	CU_ASSERT(bdev_ch[0]->flags == 0);
488 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
489 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
490 	CU_ASSERT(rc == 0);
491 
492 	set_thread(1);
493 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
494 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
495 	CU_ASSERT(bdev_ch[1]->flags == 0);
496 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
497 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
498 	CU_ASSERT(rc == 0);
499 
500 	poll_threads();
501 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
502 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
503 
504 	set_thread(0);
505 	stub_complete_io(g_bdev.io_target, 0);
506 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
507 
508 	set_thread(1);
509 	stub_complete_io(g_bdev.io_target, 0);
510 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
511 
512 	/*
513 	 * Now submit a reset, and leave it pending while we submit I/O on two different
514 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
515 	 *  progress.
516 	 */
517 	set_thread(0);
518 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
519 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_reset_done, &status_reset);
520 	CU_ASSERT(rc == 0);
521 
522 	CU_ASSERT(bdev_ch[0]->flags == 0);
523 	CU_ASSERT(bdev_ch[1]->flags == 0);
524 	poll_threads();
525 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
526 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
527 
528 	set_thread(0);
529 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
530 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_reset_done, &status0);
531 	CU_ASSERT(rc == 0);
532 
533 	set_thread(1);
534 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
535 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_reset_done, &status1);
536 	CU_ASSERT(rc == 0);
537 
538 	/*
539 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
540 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
541 	 */
542 	poll_threads();
543 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
544 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
545 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
546 
547 	/*
548 	 * Complete the reset
549 	 */
550 	set_thread(0);
551 	stub_complete_io(g_bdev.io_target, 0);
552 
553 	/*
554 	 * Only poll thread 0. We should not get a completion.
555 	 */
556 	poll_thread(0);
557 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
558 
559 	/*
560 	 * Poll both thread 0 and 1 so the messages can propagate and we
561 	 * get a completion.
562 	 */
563 	poll_threads();
564 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
565 
566 	spdk_put_io_channel(io_ch[0]);
567 	set_thread(1);
568 	spdk_put_io_channel(io_ch[1]);
569 	poll_threads();
570 
571 	teardown_test();
572 }
573 
574 static void
575 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
576 {
577 	enum spdk_bdev_io_status *status = cb_arg;
578 
579 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
580 	spdk_bdev_free_io(bdev_io);
581 }
582 
583 static uint32_t
584 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
585 {
586 	struct spdk_bdev_io *io;
587 	uint32_t cnt = 0;
588 
589 	TAILQ_FOREACH(io, tailq, link) {
590 		cnt++;
591 	}
592 
593 	return cnt;
594 }
595 
596 static void
597 enomem(void)
598 {
599 	struct spdk_io_channel *io_ch;
600 	struct spdk_bdev_channel *bdev_ch;
601 	struct spdk_bdev_module_channel *module_ch;
602 	struct ut_bdev_channel *ut_ch;
603 	const uint32_t IO_ARRAY_SIZE = 64;
604 	const uint32_t AVAIL = 20;
605 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
606 	uint32_t nomem_cnt, i;
607 	struct spdk_bdev_io *first_io;
608 	int rc;
609 
610 	setup_test();
611 
612 	set_thread(0);
613 	io_ch = spdk_bdev_get_io_channel(g_desc);
614 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
615 	module_ch = bdev_ch->module_ch;
616 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
617 	ut_ch->avail_cnt = AVAIL;
618 
619 	/* First submit a number of IOs equal to what the channel can support. */
620 	for (i = 0; i < AVAIL; i++) {
621 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
622 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
623 		CU_ASSERT(rc == 0);
624 	}
625 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
626 
627 	/*
628 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
629 	 *  the enomem_io list.
630 	 */
631 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
632 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
633 	CU_ASSERT(rc == 0);
634 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
635 	first_io = TAILQ_FIRST(&module_ch->nomem_io);
636 
637 	/*
638 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
639 	 *  the first_io above.
640 	 */
641 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
642 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
643 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
644 		CU_ASSERT(rc == 0);
645 	}
646 
647 	/* Assert that first_io is still at the head of the list. */
648 	CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io);
649 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
650 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
651 	CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
652 
653 	/*
654 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
655 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
656 	 *  list.
657 	 */
658 	stub_complete_io(g_bdev.io_target, 1);
659 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
660 
661 	/*
662 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
663 	 *  and we should see I/O get resubmitted to the test bdev module.
664 	 */
665 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
666 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt);
667 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
668 
669 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
670 	stub_complete_io(g_bdev.io_target, 1);
671 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
672 
673 	/*
674 	 * Send a reset and confirm that all I/O are completed, including the ones that
675 	 *  were queued on the nomem_io list.
676 	 */
677 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
678 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
679 	poll_threads();
680 	CU_ASSERT(rc == 0);
681 	/* This will complete the reset. */
682 	stub_complete_io(g_bdev.io_target, 0);
683 
684 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0);
685 	CU_ASSERT(module_ch->io_outstanding == 0);
686 
687 	spdk_put_io_channel(io_ch);
688 	poll_threads();
689 	teardown_test();
690 }
691 
692 static void
693 enomem_multi_bdev(void)
694 {
695 	struct spdk_io_channel *io_ch;
696 	struct spdk_bdev_channel *bdev_ch;
697 	struct spdk_bdev_module_channel *module_ch;
698 	struct ut_bdev_channel *ut_ch;
699 	const uint32_t IO_ARRAY_SIZE = 64;
700 	const uint32_t AVAIL = 20;
701 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
702 	uint32_t i;
703 	struct ut_bdev *second_bdev;
704 	struct spdk_bdev_desc *second_desc;
705 	struct spdk_bdev_channel *second_bdev_ch;
706 	struct spdk_io_channel *second_ch;
707 	int rc;
708 
709 	setup_test();
710 
711 	/* Register second bdev with the same io_target  */
712 	second_bdev = calloc(1, sizeof(*second_bdev));
713 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
714 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
715 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
716 
717 	set_thread(0);
718 	io_ch = spdk_bdev_get_io_channel(g_desc);
719 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
720 	module_ch = bdev_ch->module_ch;
721 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
722 	ut_ch->avail_cnt = AVAIL;
723 
724 	second_ch = spdk_bdev_get_io_channel(second_desc);
725 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
726 	SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch);
727 
728 	/* Saturate io_target through bdev A. */
729 	for (i = 0; i < AVAIL; i++) {
730 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
731 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
732 		CU_ASSERT(rc == 0);
733 	}
734 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
735 
736 	/*
737 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
738 	 * and then go onto the nomem_io list.
739 	 */
740 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
741 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
742 	CU_ASSERT(rc == 0);
743 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
744 
745 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
746 	stub_complete_io(g_bdev.io_target, AVAIL);
747 
748 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io));
749 	CU_ASSERT(module_ch->io_outstanding == 1);
750 
751 	/* Now complete our retried I/O  */
752 	stub_complete_io(g_bdev.io_target, 1);
753 	SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0);
754 
755 	spdk_put_io_channel(io_ch);
756 	spdk_put_io_channel(second_ch);
757 	spdk_bdev_close(second_desc);
758 	unregister_bdev(second_bdev);
759 	free(second_bdev);
760 	poll_threads();
761 	teardown_test();
762 }
763 
764 int
765 main(int argc, char **argv)
766 {
767 	CU_pSuite	suite = NULL;
768 	unsigned int	num_failures;
769 
770 	if (CU_initialize_registry() != CUE_SUCCESS) {
771 		return CU_get_error();
772 	}
773 
774 	suite = CU_add_suite("bdev", NULL, NULL);
775 	if (suite == NULL) {
776 		CU_cleanup_registry();
777 		return CU_get_error();
778 	}
779 
780 	if (
781 		CU_add_test(suite, "basic", basic) == NULL ||
782 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
783 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
784 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
785 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
786 		CU_add_test(suite, "enomem", enomem) == NULL ||
787 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL
788 	) {
789 		CU_cleanup_registry();
790 		return CU_get_error();
791 	}
792 
793 	CU_basic_set_mode(CU_BRM_VERBOSE);
794 	CU_basic_run_tests();
795 	num_failures = CU_get_number_of_failures();
796 	CU_cleanup_registry();
797 	return num_failures;
798 }
799