xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision b066126b0b9afdcae328cbb989244620398cc64d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 /* Return NULL to test hardcoded defaults. */
51 struct spdk_conf_section *
52 spdk_conf_find_section(struct spdk_conf *cp, const char *name)
53 {
54 	return NULL;
55 }
56 
57 /* Return NULL to test hardcoded defaults. */
58 char *
59 spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
60 {
61 	return NULL;
62 }
63 
64 struct ut_bdev {
65 	struct spdk_bdev	bdev;
66 	void			*io_target;
67 };
68 
69 struct ut_bdev_channel {
70 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
71 	uint32_t			outstanding_cnt;
72 	uint32_t			avail_cnt;
73 };
74 
75 int g_io_device;
76 struct ut_bdev g_bdev;
77 struct spdk_bdev_desc *g_desc;
78 bool g_teardown_done = false;
79 bool g_get_io_channel = true;
80 bool g_create_ch = true;
81 
82 static int
83 stub_create_ch(void *io_device, void *ctx_buf)
84 {
85 	struct ut_bdev_channel *ch = ctx_buf;
86 
87 	if (g_create_ch == false) {
88 		return -1;
89 	}
90 
91 	TAILQ_INIT(&ch->outstanding_io);
92 	ch->outstanding_cnt = 0;
93 	/*
94 	 * When avail gets to 0, the submit_request function will return ENOMEM.
95 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
96 	 *  big value that won't get hit.  The ENOMEM tests can then override this
97 	 *  value to something much smaller to induce ENOMEM conditions.
98 	 */
99 	ch->avail_cnt = 2048;
100 	return 0;
101 }
102 
103 static void
104 stub_destroy_ch(void *io_device, void *ctx_buf)
105 {
106 }
107 
108 static struct spdk_io_channel *
109 stub_get_io_channel(void *ctx)
110 {
111 	struct ut_bdev *ut_bdev = ctx;
112 
113 	if (g_get_io_channel == true) {
114 		return spdk_get_io_channel(ut_bdev->io_target);
115 	} else {
116 		return NULL;
117 	}
118 }
119 
120 static int
121 stub_destruct(void *ctx)
122 {
123 	return 0;
124 }
125 
126 static void
127 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
128 {
129 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
130 
131 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
132 		struct spdk_bdev_io *io;
133 
134 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
135 			io = TAILQ_FIRST(&ch->outstanding_io);
136 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
137 			ch->outstanding_cnt--;
138 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
139 			ch->avail_cnt++;
140 		}
141 	}
142 
143 	if (ch->avail_cnt > 0) {
144 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
145 		ch->outstanding_cnt++;
146 		ch->avail_cnt--;
147 	} else {
148 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
149 	}
150 }
151 
152 static uint32_t
153 stub_complete_io(void *io_target, uint32_t num_to_complete)
154 {
155 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
156 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
157 	struct spdk_bdev_io *io;
158 	bool complete_all = (num_to_complete == 0);
159 	uint32_t num_completed = 0;
160 
161 	while (complete_all || num_completed < num_to_complete) {
162 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
163 			break;
164 		}
165 		io = TAILQ_FIRST(&ch->outstanding_io);
166 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
167 		ch->outstanding_cnt--;
168 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
169 		ch->avail_cnt++;
170 		num_completed++;
171 	}
172 
173 	spdk_put_io_channel(_ch);
174 	return num_completed;
175 }
176 
177 static struct spdk_bdev_fn_table fn_table = {
178 	.get_io_channel =	stub_get_io_channel,
179 	.destruct =		stub_destruct,
180 	.submit_request =	stub_submit_request,
181 };
182 
183 static int
184 module_init(void)
185 {
186 	return 0;
187 }
188 
189 static void
190 module_fini(void)
191 {
192 }
193 
194 struct spdk_bdev_module bdev_ut_if = {
195 	.name = "bdev_ut",
196 	.module_init = module_init,
197 	.module_fini = module_fini,
198 };
199 
200 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
201 
202 static void
203 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
204 {
205 	memset(ut_bdev, 0, sizeof(*ut_bdev));
206 
207 	ut_bdev->io_target = io_target;
208 	ut_bdev->bdev.ctxt = ut_bdev;
209 	ut_bdev->bdev.name = name;
210 	ut_bdev->bdev.fn_table = &fn_table;
211 	ut_bdev->bdev.module = &bdev_ut_if;
212 	ut_bdev->bdev.blocklen = 4096;
213 	ut_bdev->bdev.blockcnt = 1024;
214 
215 	spdk_bdev_register(&ut_bdev->bdev);
216 }
217 
218 static void
219 unregister_bdev(struct ut_bdev *ut_bdev)
220 {
221 	/* Handle any deferred messages. */
222 	poll_threads();
223 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
224 }
225 
226 static void
227 bdev_init_cb(void *done, int rc)
228 {
229 	CU_ASSERT(rc == 0);
230 	*(bool *)done = true;
231 }
232 
233 static void
234 setup_test(void)
235 {
236 	bool done = false;
237 
238 	allocate_threads(BDEV_UT_NUM_THREADS);
239 	spdk_bdev_initialize(bdev_init_cb, &done);
240 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
241 				sizeof(struct ut_bdev_channel));
242 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
243 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
244 }
245 
246 static void
247 finish_cb(void *cb_arg)
248 {
249 	g_teardown_done = true;
250 }
251 
252 static void
253 teardown_test(void)
254 {
255 	g_teardown_done = false;
256 	spdk_bdev_close(g_desc);
257 	g_desc = NULL;
258 	unregister_bdev(&g_bdev);
259 	spdk_io_device_unregister(&g_io_device, NULL);
260 	spdk_bdev_finish(finish_cb, NULL);
261 	poll_threads();
262 	memset(&g_bdev, 0, sizeof(g_bdev));
263 	CU_ASSERT(g_teardown_done == true);
264 	g_teardown_done = false;
265 	free_threads();
266 }
267 
268 static uint32_t
269 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
270 {
271 	struct spdk_bdev_io *io;
272 	uint32_t cnt = 0;
273 
274 	TAILQ_FOREACH(io, tailq, link) {
275 		cnt++;
276 	}
277 
278 	return cnt;
279 }
280 
281 static void
282 basic(void)
283 {
284 	setup_test();
285 
286 	set_thread(0);
287 
288 	g_get_io_channel = false;
289 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
290 	CU_ASSERT(g_ut_threads[0].ch == NULL);
291 
292 	g_get_io_channel = true;
293 	g_create_ch = false;
294 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
295 	CU_ASSERT(g_ut_threads[0].ch == NULL);
296 
297 	g_get_io_channel = true;
298 	g_create_ch = true;
299 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
300 	CU_ASSERT(g_ut_threads[0].ch != NULL);
301 	spdk_put_io_channel(g_ut_threads[0].ch);
302 
303 	teardown_test();
304 }
305 
306 static int
307 poller_run_done(void *ctx)
308 {
309 	bool	*poller_run = ctx;
310 
311 	*poller_run = true;
312 
313 	return -1;
314 }
315 
316 static int
317 poller_run_times_done(void *ctx)
318 {
319 	int	*poller_run_times = ctx;
320 
321 	(*poller_run_times)++;
322 
323 	return -1;
324 }
325 
326 static void
327 basic_poller(void)
328 {
329 	struct spdk_poller	*poller = NULL;
330 	bool			poller_run = false;
331 	int			poller_run_times = 0;
332 
333 	setup_test();
334 
335 	set_thread(0);
336 	reset_time();
337 	/* Register a poller with no-wait time and test execution */
338 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
339 	CU_ASSERT(poller != NULL);
340 
341 	poll_threads();
342 	CU_ASSERT(poller_run == true);
343 
344 	spdk_poller_unregister(&poller);
345 	CU_ASSERT(poller == NULL);
346 
347 	/* Register a poller with 1000us wait time and test single execution */
348 	poller_run = false;
349 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
350 	CU_ASSERT(poller != NULL);
351 
352 	poll_threads();
353 	CU_ASSERT(poller_run == false);
354 
355 	increment_time(1000);
356 	poll_threads();
357 	CU_ASSERT(poller_run == true);
358 
359 	reset_time();
360 	poller_run = false;
361 	poll_threads();
362 	CU_ASSERT(poller_run == false);
363 
364 	increment_time(1000);
365 	poll_threads();
366 	CU_ASSERT(poller_run == true);
367 
368 	spdk_poller_unregister(&poller);
369 	CU_ASSERT(poller == NULL);
370 
371 	reset_time();
372 	/* Register a poller with 1000us wait time and test multiple execution */
373 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
374 	CU_ASSERT(poller != NULL);
375 
376 	poll_threads();
377 	CU_ASSERT(poller_run_times == 0);
378 
379 	increment_time(1000);
380 	poll_threads();
381 	CU_ASSERT(poller_run_times == 1);
382 
383 	poller_run_times = 0;
384 	increment_time(2000);
385 	poll_threads();
386 	CU_ASSERT(poller_run_times == 2);
387 
388 	spdk_poller_unregister(&poller);
389 	CU_ASSERT(poller == NULL);
390 
391 	teardown_test();
392 }
393 
394 static void
395 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
396 {
397 	bool *done = cb_arg;
398 
399 	CU_ASSERT(success == true);
400 	*done = true;
401 	spdk_bdev_free_io(bdev_io);
402 }
403 
404 static void
405 put_channel_during_reset(void)
406 {
407 	struct spdk_io_channel *io_ch;
408 	bool done = false;
409 
410 	setup_test();
411 
412 	set_thread(0);
413 	io_ch = spdk_bdev_get_io_channel(g_desc);
414 	CU_ASSERT(io_ch != NULL);
415 
416 	/*
417 	 * Start a reset, but then put the I/O channel before
418 	 *  the deferred messages for the reset get a chance to
419 	 *  execute.
420 	 */
421 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
422 	spdk_put_io_channel(io_ch);
423 	poll_threads();
424 	stub_complete_io(g_bdev.io_target, 0);
425 
426 	teardown_test();
427 }
428 
429 static void
430 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
431 {
432 	enum spdk_bdev_io_status *status = cb_arg;
433 
434 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
435 	spdk_bdev_free_io(bdev_io);
436 }
437 
438 static void
439 aborted_reset(void)
440 {
441 	struct spdk_io_channel *io_ch[2];
442 	enum spdk_bdev_io_status status1, status2;
443 
444 	setup_test();
445 
446 	set_thread(0);
447 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
448 	CU_ASSERT(io_ch[0] != NULL);
449 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
450 	poll_threads();
451 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
452 
453 	/*
454 	 * First reset has been submitted on ch0.  Now submit a second
455 	 *  reset on ch1 which will get queued since there is already a
456 	 *  reset in progress.
457 	 */
458 	set_thread(1);
459 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
460 	CU_ASSERT(io_ch[1] != NULL);
461 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
462 	poll_threads();
463 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
464 
465 	/*
466 	 * Now destroy ch1.  This will abort the queued reset.  Check that
467 	 *  the second reset was completed with failed status.  Also check
468 	 *  that bdev->reset_in_progress != NULL, since the original reset
469 	 *  has not been completed yet.  This ensures that the bdev code is
470 	 *  correctly noticing that the failed reset is *not* the one that
471 	 *  had been submitted to the bdev module.
472 	 */
473 	set_thread(1);
474 	spdk_put_io_channel(io_ch[1]);
475 	poll_threads();
476 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
477 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
478 
479 	/*
480 	 * Now complete the first reset, verify that it completed with SUCCESS
481 	 *  status and that bdev->reset_in_progress is also set back to NULL.
482 	 */
483 	set_thread(0);
484 	spdk_put_io_channel(io_ch[0]);
485 	stub_complete_io(g_bdev.io_target, 0);
486 	poll_threads();
487 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
488 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
489 
490 	teardown_test();
491 }
492 
493 static void
494 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
495 {
496 	enum spdk_bdev_io_status *status = cb_arg;
497 
498 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
499 	spdk_bdev_free_io(bdev_io);
500 }
501 
502 static void
503 io_during_reset(void)
504 {
505 	struct spdk_io_channel *io_ch[2];
506 	struct spdk_bdev_channel *bdev_ch[2];
507 	enum spdk_bdev_io_status status0, status1, status_reset;
508 	int rc;
509 
510 	setup_test();
511 
512 	/*
513 	 * First test normal case - submit an I/O on each of two channels (with no resets)
514 	 *  and verify they complete successfully.
515 	 */
516 	set_thread(0);
517 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
518 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
519 	CU_ASSERT(bdev_ch[0]->flags == 0);
520 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
521 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
522 	CU_ASSERT(rc == 0);
523 
524 	set_thread(1);
525 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
526 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
527 	CU_ASSERT(bdev_ch[1]->flags == 0);
528 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
529 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
530 	CU_ASSERT(rc == 0);
531 
532 	poll_threads();
533 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
534 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
535 
536 	set_thread(0);
537 	stub_complete_io(g_bdev.io_target, 0);
538 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
539 
540 	set_thread(1);
541 	stub_complete_io(g_bdev.io_target, 0);
542 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
543 
544 	/*
545 	 * Now submit a reset, and leave it pending while we submit I/O on two different
546 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
547 	 *  progress.
548 	 */
549 	set_thread(0);
550 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
551 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
552 	CU_ASSERT(rc == 0);
553 
554 	CU_ASSERT(bdev_ch[0]->flags == 0);
555 	CU_ASSERT(bdev_ch[1]->flags == 0);
556 	poll_threads();
557 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
558 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
559 
560 	set_thread(0);
561 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
562 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
563 	CU_ASSERT(rc == 0);
564 
565 	set_thread(1);
566 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
567 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
568 	CU_ASSERT(rc == 0);
569 
570 	/*
571 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
572 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
573 	 */
574 	poll_threads();
575 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
576 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
577 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
578 
579 	/*
580 	 * Complete the reset
581 	 */
582 	set_thread(0);
583 	stub_complete_io(g_bdev.io_target, 0);
584 
585 	/*
586 	 * Only poll thread 0. We should not get a completion.
587 	 */
588 	poll_thread(0);
589 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
590 
591 	/*
592 	 * Poll both thread 0 and 1 so the messages can propagate and we
593 	 * get a completion.
594 	 */
595 	poll_threads();
596 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
597 
598 	spdk_put_io_channel(io_ch[0]);
599 	set_thread(1);
600 	spdk_put_io_channel(io_ch[1]);
601 	poll_threads();
602 
603 	teardown_test();
604 }
605 
606 static void
607 basic_qos(void)
608 {
609 	struct spdk_io_channel *io_ch[3];
610 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
611 	struct spdk_bdev *bdev;
612 	enum spdk_bdev_io_status status;
613 	struct spdk_bdev_module_channel *module_ch;
614 	int rc;
615 
616 	setup_test();
617 
618 	/*
619 	 * First test normal case - submit an I/O on the channel (QoS not enabled)
620 	 *  and verify it completes successfully.
621 	 */
622 	set_thread(0);
623 	g_get_io_channel = false;
624 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
625 	CU_ASSERT(io_ch[0] == NULL);
626 	g_get_io_channel = true;
627 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
628 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
629 	status = SPDK_BDEV_IO_STATUS_PENDING;
630 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
631 	CU_ASSERT(rc == 0);
632 	CU_ASSERT(bdev_ch[0]->flags == 0);
633 
634 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
635 
636 	set_thread(0);
637 	stub_complete_io(g_bdev.io_target, 0);
638 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
639 
640 	poll_threads();
641 
642 	set_thread(1);
643 	bdev = &g_bdev.bdev;
644 	bdev->ios_per_sec = 2000;
645 	g_get_io_channel = false;
646 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
647 	CU_ASSERT(io_ch[1] == NULL);
648 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
649 	qos_bdev_ch = bdev->qos_channel;
650 	CU_ASSERT(qos_bdev_ch == NULL);
651 	g_get_io_channel = true;
652 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
653 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
654 	qos_bdev_ch = bdev->qos_channel;
655 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
656 	CU_ASSERT(qos_bdev_ch != NULL);
657 	module_ch = qos_bdev_ch->module_ch;
658 	CU_ASSERT(module_ch->io_outstanding == 0);
659 	CU_ASSERT(g_ut_threads[1].thread == bdev->qos_thread);
660 
661 	/*
662 	 * Now sending one I/O on first channel
663 	 */
664 	set_thread(0);
665 	status = SPDK_BDEV_IO_STATUS_PENDING;
666 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
667 	CU_ASSERT(rc == 0);
668 
669 	poll_threads();
670 	CU_ASSERT(module_ch->io_outstanding == 1);
671 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
672 
673 	/*
674 	 * IO is operated on thread_id(1) via the QoS thread
675 	 */
676 	set_thread(1);
677 	stub_complete_io(g_bdev.io_target, 1);
678 
679 	poll_threads();
680 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
681 
682 	/*
683 	 * QoS thread is on thread 1. Put I/O channel on thread 1 first
684 	 * to trigger an async destruction of QoS bdev channel.
685 	 */
686 	set_thread(1);
687 	spdk_put_io_channel(io_ch[0]);
688 	set_thread(0);
689 	spdk_put_io_channel(io_ch[1]);
690 
691 	/*
692 	 * Handle the messages on thread 1 first so that the QoS bdev
693 	 * channel destroy message from thread 0 handling will be active
694 	 * there.
695 	 */
696 	poll_thread(1);
697 	poll_thread(0);
698 
699 	/*
700 	 * Create a new I/O channel when the async destruction of QoS
701 	 * bdev channel is on going. The expected result is the QoS bdev
702 	 * channel will be properly setup again.
703 	 */
704 	set_thread(2);
705 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
706 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
707 
708 	poll_threads();
709 
710 	qos_bdev_ch = bdev->qos_channel;
711 	CU_ASSERT(qos_bdev_ch->flags == BDEV_CH_QOS_ENABLED);
712 	CU_ASSERT(qos_bdev_ch != NULL);
713 	module_ch = qos_bdev_ch->module_ch;
714 	CU_ASSERT(module_ch->io_outstanding == 0);
715 	CU_ASSERT(g_ut_threads[2].thread == bdev->qos_thread);
716 
717 	/*
718 	 * Destroy the last I/O channel so that the QoS bdev channel
719 	 * will be destroyed.
720 	 */
721 	set_thread(2);
722 	spdk_put_io_channel(io_ch[2]);
723 
724 	poll_threads();
725 
726 	teardown_test();
727 }
728 
729 static void
730 io_during_qos(void)
731 {
732 	struct spdk_io_channel *io_ch[3];
733 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
734 	struct spdk_bdev *bdev;
735 	enum spdk_bdev_io_status status0, status1;
736 	struct spdk_bdev_module_channel *module_ch;
737 	int rc;
738 
739 	setup_test();
740 
741 	/*
742 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
743 	 *  and verify they complete successfully.
744 	 */
745 	set_thread(0);
746 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
747 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
748 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
749 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
750 	CU_ASSERT(rc == 0);
751 	CU_ASSERT(bdev_ch[0]->flags == 0);
752 
753 	set_thread(1);
754 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
755 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
756 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
757 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
758 	CU_ASSERT(rc == 0);
759 	CU_ASSERT(bdev_ch[1]->flags == 0);
760 
761 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
762 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
763 
764 	set_thread(0);
765 	stub_complete_io(g_bdev.io_target, 0);
766 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
767 
768 	set_thread(1);
769 	stub_complete_io(g_bdev.io_target, 0);
770 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
771 
772 	poll_threads();
773 
774 	set_thread(2);
775 	bdev = &g_bdev.bdev;
776 	/*
777 	 * 10 IOs allowed per millisecond
778 	 */
779 	bdev->ios_per_sec = 10000;
780 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
781 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
782 	qos_bdev_ch = bdev->qos_channel;
783 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
784 	CU_ASSERT(qos_bdev_ch != NULL);
785 	module_ch = qos_bdev_ch->module_ch;
786 	CU_ASSERT(module_ch->io_outstanding == 0);
787 
788 	/*
789 	 * Now sending some I/Os on different channels when QoS has been enabled
790 	 */
791 	set_thread(0);
792 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
793 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
794 	CU_ASSERT(rc == 0);
795 
796 	set_thread(1);
797 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
798 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
799 	CU_ASSERT(rc == 0);
800 
801 	poll_threads();
802 	CU_ASSERT(module_ch->io_outstanding == 2);
803 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
804 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
805 
806 	/*
807 	 * IOs are operated on thread_id(2) via the QoS thread
808 	 */
809 	set_thread(2);
810 	stub_complete_io(g_bdev.io_target, 2);
811 
812 	poll_threads();
813 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
814 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
815 
816 	set_thread(0);
817 	spdk_put_io_channel(io_ch[0]);
818 	set_thread(1);
819 	spdk_put_io_channel(io_ch[1]);
820 	set_thread(2);
821 	spdk_put_io_channel(io_ch[2]);
822 
823 	poll_threads();
824 
825 	teardown_test();
826 }
827 
828 static void
829 io_during_qos_queue(void)
830 {
831 	struct spdk_io_channel *io_ch[3];
832 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
833 	struct spdk_bdev *bdev;
834 	enum spdk_bdev_io_status status0, status1;
835 	struct spdk_bdev_module_channel *module_ch;
836 	int rc;
837 
838 	setup_test();
839 	reset_time();
840 
841 	/*
842 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
843 	 *  and verify they complete successfully.
844 	 */
845 	set_thread(0);
846 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
847 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
848 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
849 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
850 	CU_ASSERT(rc == 0);
851 	CU_ASSERT(bdev_ch[0]->flags == 0);
852 
853 	set_thread(1);
854 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
855 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
856 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
857 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
858 	CU_ASSERT(rc == 0);
859 	CU_ASSERT(bdev_ch[1]->flags == 0);
860 
861 	poll_threads();
862 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
863 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
864 
865 	set_thread(0);
866 	stub_complete_io(g_bdev.io_target, 0);
867 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
868 
869 	set_thread(1);
870 	stub_complete_io(g_bdev.io_target, 0);
871 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
872 
873 	poll_threads();
874 
875 	set_thread(2);
876 	bdev = bdev_ch[0]->bdev;
877 	/*
878 	 * Only 1 IO allowed per millisecond. More IOs will be queued.
879 	 */
880 	bdev->ios_per_sec = 1000;
881 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
882 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
883 	qos_bdev_ch = bdev->qos_channel;
884 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
885 	CU_ASSERT(qos_bdev_ch != NULL);
886 	module_ch = qos_bdev_ch->module_ch;
887 	CU_ASSERT(module_ch->io_outstanding == 0);
888 
889 	/*
890 	 * Now sending some I/Os on different channels when QoS has been enabled
891 	 */
892 	set_thread(0);
893 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
894 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
895 	CU_ASSERT(rc == 0);
896 
897 	set_thread(1);
898 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
899 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
900 	CU_ASSERT(rc == 0);
901 
902 	/*
903 	 * Poll the QoS thread to send the allowed I/O down
904 	 */
905 	poll_threads();
906 	CU_ASSERT(module_ch->io_outstanding == 1);
907 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 1);
908 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
909 
910 	/*
911 	 * Increase the time and poll the QoS thread to run the periodical poller
912 	 */
913 	increment_time(1000);
914 	poll_threads();
915 	CU_ASSERT(module_ch->io_outstanding == 2);
916 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 0);
917 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
918 
919 	/*
920 	 * IOs are handled on the thread(2) as the master thread
921 	 */
922 	set_thread(2);
923 	stub_complete_io(g_bdev.io_target, 0);
924 	spdk_put_io_channel(io_ch[0]);
925 	spdk_put_io_channel(io_ch[1]);
926 	spdk_put_io_channel(io_ch[2]);
927 
928 	poll_threads();
929 
930 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
931 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
932 
933 	teardown_test();
934 }
935 
936 static void
937 io_during_qos_reset(void)
938 {
939 	struct spdk_io_channel *io_ch[3];
940 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
941 	struct spdk_bdev *bdev;
942 	enum spdk_bdev_io_status status0, status1, status_reset;
943 	struct spdk_bdev_module_channel *module_ch;
944 	int rc;
945 
946 	setup_test();
947 
948 	/*
949 	 * First test normal case - submit an I/O on each of two channels (QoS disabled and no reset)
950 	 *  and verify they complete successfully.
951 	 */
952 	set_thread(0);
953 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
954 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
955 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
956 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
957 	CU_ASSERT(rc == 0);
958 	CU_ASSERT(bdev_ch[0]->flags == 0);
959 
960 	set_thread(1);
961 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
962 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
963 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
964 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
965 	CU_ASSERT(rc == 0);
966 	CU_ASSERT(bdev_ch[1]->flags == 0);
967 
968 	poll_threads();
969 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
970 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
971 
972 	set_thread(0);
973 	stub_complete_io(g_bdev.io_target, 0);
974 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
975 
976 	set_thread(1);
977 	stub_complete_io(g_bdev.io_target, 0);
978 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
979 
980 	/*
981 	 * Enable QoS on the bdev
982 	 */
983 	set_thread(2);
984 	bdev = bdev_ch[0]->bdev;
985 	bdev->ios_per_sec = 2000;
986 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
987 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
988 	qos_bdev_ch = bdev->qos_channel;
989 	module_ch = qos_bdev_ch->module_ch;
990 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
991 	CU_ASSERT(qos_bdev_ch != NULL);
992 	CU_ASSERT(module_ch != NULL);
993 
994 	/*
995 	 * Now submit a reset, and leave it pending while we submit I/O on two different
996 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
997 	 *  progress.
998 	 */
999 	set_thread(0);
1000 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1001 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
1002 	CU_ASSERT(rc == 0);
1003 
1004 	CU_ASSERT(bdev_ch[0]->flags == 0);
1005 	CU_ASSERT(bdev_ch[1]->flags == 0);
1006 	CU_ASSERT(bdev_ch[2]->flags == 0);
1007 	CU_ASSERT(qos_bdev_ch->flags & BDEV_CH_QOS_ENABLED);
1008 	poll_threads();
1009 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
1010 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
1011 	CU_ASSERT(bdev_ch[2]->flags == BDEV_CH_RESET_IN_PROGRESS);
1012 	CU_ASSERT(qos_bdev_ch->flags & BDEV_CH_RESET_IN_PROGRESS);
1013 
1014 	set_thread(0);
1015 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1016 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1017 	CU_ASSERT(rc == 0);
1018 
1019 	set_thread(1);
1020 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1021 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1022 	CU_ASSERT(rc == 0);
1023 
1024 	/*
1025 	 * A reset is in progress so these read I/O should complete with failure when QoS has been
1026 	 * enabled. Note that we need to poll_threads() since I/O completed inline have their
1027 	 *  completion deferred.
1028 	 */
1029 	poll_threads();
1030 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
1031 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
1032 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
1033 
1034 	set_thread(0);
1035 	stub_complete_io(g_bdev.io_target, 0);
1036 	spdk_put_io_channel(io_ch[0]);
1037 	set_thread(1);
1038 	stub_complete_io(g_bdev.io_target, 0);
1039 	spdk_put_io_channel(io_ch[1]);
1040 	set_thread(2);
1041 	stub_complete_io(g_bdev.io_target, 0);
1042 	spdk_put_io_channel(io_ch[2]);
1043 	poll_threads();
1044 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
1045 
1046 	teardown_test();
1047 }
1048 
1049 static void
1050 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1051 {
1052 	enum spdk_bdev_io_status *status = cb_arg;
1053 
1054 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1055 	spdk_bdev_free_io(bdev_io);
1056 }
1057 
1058 static void
1059 enomem(void)
1060 {
1061 	struct spdk_io_channel *io_ch;
1062 	struct spdk_bdev_channel *bdev_ch;
1063 	struct spdk_bdev_module_channel *module_ch;
1064 	struct ut_bdev_channel *ut_ch;
1065 	const uint32_t IO_ARRAY_SIZE = 64;
1066 	const uint32_t AVAIL = 20;
1067 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1068 	uint32_t nomem_cnt, i;
1069 	struct spdk_bdev_io *first_io;
1070 	int rc;
1071 
1072 	setup_test();
1073 
1074 	set_thread(0);
1075 	io_ch = spdk_bdev_get_io_channel(g_desc);
1076 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1077 	module_ch = bdev_ch->module_ch;
1078 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1079 	ut_ch->avail_cnt = AVAIL;
1080 
1081 	/* First submit a number of IOs equal to what the channel can support. */
1082 	for (i = 0; i < AVAIL; i++) {
1083 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1084 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1085 		CU_ASSERT(rc == 0);
1086 	}
1087 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
1088 
1089 	/*
1090 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1091 	 *  the enomem_io list.
1092 	 */
1093 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1094 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1095 	CU_ASSERT(rc == 0);
1096 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
1097 	first_io = TAILQ_FIRST(&module_ch->nomem_io);
1098 
1099 	/*
1100 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1101 	 *  the first_io above.
1102 	 */
1103 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1104 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1105 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1106 		CU_ASSERT(rc == 0);
1107 	}
1108 
1109 	/* Assert that first_io is still at the head of the list. */
1110 	CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io);
1111 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1112 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
1113 	CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1114 
1115 	/*
1116 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1117 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1118 	 *  list.
1119 	 */
1120 	stub_complete_io(g_bdev.io_target, 1);
1121 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
1122 
1123 	/*
1124 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
1125 	 *  and we should see I/O get resubmitted to the test bdev module.
1126 	 */
1127 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1128 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt);
1129 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
1130 
1131 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1132 	stub_complete_io(g_bdev.io_target, 1);
1133 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
1134 
1135 	/*
1136 	 * Send a reset and confirm that all I/O are completed, including the ones that
1137 	 *  were queued on the nomem_io list.
1138 	 */
1139 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1140 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1141 	poll_threads();
1142 	CU_ASSERT(rc == 0);
1143 	/* This will complete the reset. */
1144 	stub_complete_io(g_bdev.io_target, 0);
1145 
1146 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0);
1147 	CU_ASSERT(module_ch->io_outstanding == 0);
1148 
1149 	spdk_put_io_channel(io_ch);
1150 	poll_threads();
1151 	teardown_test();
1152 }
1153 
1154 static void
1155 enomem_multi_bdev(void)
1156 {
1157 	struct spdk_io_channel *io_ch;
1158 	struct spdk_bdev_channel *bdev_ch;
1159 	struct spdk_bdev_module_channel *module_ch;
1160 	struct ut_bdev_channel *ut_ch;
1161 	const uint32_t IO_ARRAY_SIZE = 64;
1162 	const uint32_t AVAIL = 20;
1163 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1164 	uint32_t i;
1165 	struct ut_bdev *second_bdev;
1166 	struct spdk_bdev_desc *second_desc;
1167 	struct spdk_bdev_channel *second_bdev_ch;
1168 	struct spdk_io_channel *second_ch;
1169 	int rc;
1170 
1171 	setup_test();
1172 
1173 	/* Register second bdev with the same io_target  */
1174 	second_bdev = calloc(1, sizeof(*second_bdev));
1175 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1176 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1177 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1178 
1179 	set_thread(0);
1180 	io_ch = spdk_bdev_get_io_channel(g_desc);
1181 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1182 	module_ch = bdev_ch->module_ch;
1183 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1184 	ut_ch->avail_cnt = AVAIL;
1185 
1186 	second_ch = spdk_bdev_get_io_channel(second_desc);
1187 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1188 	SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch);
1189 
1190 	/* Saturate io_target through bdev A. */
1191 	for (i = 0; i < AVAIL; i++) {
1192 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1193 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1194 		CU_ASSERT(rc == 0);
1195 	}
1196 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
1197 
1198 	/*
1199 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1200 	 * and then go onto the nomem_io list.
1201 	 */
1202 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1203 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1204 	CU_ASSERT(rc == 0);
1205 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
1206 
1207 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1208 	stub_complete_io(g_bdev.io_target, AVAIL);
1209 
1210 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io));
1211 	CU_ASSERT(module_ch->io_outstanding == 1);
1212 
1213 	/* Now complete our retried I/O  */
1214 	stub_complete_io(g_bdev.io_target, 1);
1215 	SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0);
1216 
1217 	spdk_put_io_channel(io_ch);
1218 	spdk_put_io_channel(second_ch);
1219 	spdk_bdev_close(second_desc);
1220 	unregister_bdev(second_bdev);
1221 	poll_threads();
1222 	free(second_bdev);
1223 	teardown_test();
1224 }
1225 
1226 int
1227 main(int argc, char **argv)
1228 {
1229 	CU_pSuite	suite = NULL;
1230 	unsigned int	num_failures;
1231 
1232 	if (CU_initialize_registry() != CUE_SUCCESS) {
1233 		return CU_get_error();
1234 	}
1235 
1236 	suite = CU_add_suite("bdev", NULL, NULL);
1237 	if (suite == NULL) {
1238 		CU_cleanup_registry();
1239 		return CU_get_error();
1240 	}
1241 
1242 	if (
1243 		CU_add_test(suite, "basic", basic) == NULL ||
1244 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1245 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1246 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1247 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1248 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1249 		CU_add_test(suite, "io_during_qos", io_during_qos) == NULL ||
1250 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1251 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1252 		CU_add_test(suite, "enomem", enomem) == NULL ||
1253 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL
1254 	) {
1255 		CU_cleanup_registry();
1256 		return CU_get_error();
1257 	}
1258 
1259 	CU_basic_set_mode(CU_BRM_VERBOSE);
1260 	CU_basic_run_tests();
1261 	num_failures = CU_get_number_of_failures();
1262 	CU_cleanup_registry();
1263 	return num_failures;
1264 }
1265