xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 8a0a98d35e21f282088edf28b9e8da66ec390e3a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct ut_bdev {
57 	struct spdk_bdev	bdev;
58 	void			*io_target;
59 };
60 
61 struct ut_bdev_channel {
62 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
63 	uint32_t			outstanding_cnt;
64 	uint32_t			avail_cnt;
65 };
66 
67 int g_io_device;
68 struct ut_bdev g_bdev;
69 struct spdk_bdev_desc *g_desc;
70 bool g_teardown_done = false;
71 bool g_get_io_channel = true;
72 bool g_create_ch = true;
73 bool g_init_complete_called = false;
74 
75 static int
76 stub_create_ch(void *io_device, void *ctx_buf)
77 {
78 	struct ut_bdev_channel *ch = ctx_buf;
79 
80 	if (g_create_ch == false) {
81 		return -1;
82 	}
83 
84 	TAILQ_INIT(&ch->outstanding_io);
85 	ch->outstanding_cnt = 0;
86 	/*
87 	 * When avail gets to 0, the submit_request function will return ENOMEM.
88 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
89 	 *  big value that won't get hit.  The ENOMEM tests can then override this
90 	 *  value to something much smaller to induce ENOMEM conditions.
91 	 */
92 	ch->avail_cnt = 2048;
93 	return 0;
94 }
95 
96 static void
97 stub_destroy_ch(void *io_device, void *ctx_buf)
98 {
99 }
100 
101 static struct spdk_io_channel *
102 stub_get_io_channel(void *ctx)
103 {
104 	struct ut_bdev *ut_bdev = ctx;
105 
106 	if (g_get_io_channel == true) {
107 		return spdk_get_io_channel(ut_bdev->io_target);
108 	} else {
109 		return NULL;
110 	}
111 }
112 
113 static int
114 stub_destruct(void *ctx)
115 {
116 	return 0;
117 }
118 
119 static void
120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
121 {
122 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
123 
124 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
125 		struct spdk_bdev_io *io;
126 
127 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
128 			io = TAILQ_FIRST(&ch->outstanding_io);
129 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
130 			ch->outstanding_cnt--;
131 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
132 			ch->avail_cnt++;
133 		}
134 	}
135 
136 	if (ch->avail_cnt > 0) {
137 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
138 		ch->outstanding_cnt++;
139 		ch->avail_cnt--;
140 	} else {
141 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
142 	}
143 }
144 
145 static uint32_t
146 stub_complete_io(void *io_target, uint32_t num_to_complete)
147 {
148 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
149 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
150 	struct spdk_bdev_io *io;
151 	bool complete_all = (num_to_complete == 0);
152 	uint32_t num_completed = 0;
153 
154 	while (complete_all || num_completed < num_to_complete) {
155 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
156 			break;
157 		}
158 		io = TAILQ_FIRST(&ch->outstanding_io);
159 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
160 		ch->outstanding_cnt--;
161 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
162 		ch->avail_cnt++;
163 		num_completed++;
164 	}
165 
166 	spdk_put_io_channel(_ch);
167 	return num_completed;
168 }
169 
170 static struct spdk_bdev_fn_table fn_table = {
171 	.get_io_channel =	stub_get_io_channel,
172 	.destruct =		stub_destruct,
173 	.submit_request =	stub_submit_request,
174 };
175 
176 static int
177 module_init(void)
178 {
179 	return 0;
180 }
181 
182 static void
183 module_fini(void)
184 {
185 }
186 
187 static void
188 init_complete(void)
189 {
190 	g_init_complete_called = true;
191 }
192 
193 struct spdk_bdev_module bdev_ut_if = {
194 	.name = "bdev_ut",
195 	.module_init = module_init,
196 	.module_fini = module_fini,
197 	.init_complete = init_complete,
198 };
199 
200 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
201 
202 static void
203 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
204 {
205 	memset(ut_bdev, 0, sizeof(*ut_bdev));
206 
207 	ut_bdev->io_target = io_target;
208 	ut_bdev->bdev.ctxt = ut_bdev;
209 	ut_bdev->bdev.name = name;
210 	ut_bdev->bdev.fn_table = &fn_table;
211 	ut_bdev->bdev.module = &bdev_ut_if;
212 	ut_bdev->bdev.blocklen = 4096;
213 	ut_bdev->bdev.blockcnt = 1024;
214 
215 	spdk_bdev_register(&ut_bdev->bdev);
216 }
217 
218 static void
219 unregister_bdev(struct ut_bdev *ut_bdev)
220 {
221 	/* Handle any deferred messages. */
222 	poll_threads();
223 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
224 }
225 
226 static void
227 bdev_init_cb(void *done, int rc)
228 {
229 	CU_ASSERT(rc == 0);
230 	*(bool *)done = true;
231 }
232 
233 static void
234 setup_test(void)
235 {
236 	bool done = false;
237 
238 	allocate_threads(BDEV_UT_NUM_THREADS);
239 	spdk_bdev_initialize(bdev_init_cb, &done);
240 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
241 				sizeof(struct ut_bdev_channel));
242 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
243 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
244 }
245 
246 static void
247 finish_cb(void *cb_arg)
248 {
249 	g_teardown_done = true;
250 }
251 
252 static void
253 teardown_test(void)
254 {
255 	g_teardown_done = false;
256 	spdk_bdev_close(g_desc);
257 	g_desc = NULL;
258 	unregister_bdev(&g_bdev);
259 	spdk_io_device_unregister(&g_io_device, NULL);
260 	spdk_bdev_finish(finish_cb, NULL);
261 	poll_threads();
262 	memset(&g_bdev, 0, sizeof(g_bdev));
263 	CU_ASSERT(g_teardown_done == true);
264 	g_teardown_done = false;
265 	free_threads();
266 }
267 
268 static uint32_t
269 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
270 {
271 	struct spdk_bdev_io *io;
272 	uint32_t cnt = 0;
273 
274 	TAILQ_FOREACH(io, tailq, link) {
275 		cnt++;
276 	}
277 
278 	return cnt;
279 }
280 
281 static void
282 basic(void)
283 {
284 	g_init_complete_called = false;
285 	setup_test();
286 	CU_ASSERT(g_init_complete_called == true);
287 
288 	set_thread(0);
289 
290 	g_get_io_channel = false;
291 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
292 	CU_ASSERT(g_ut_threads[0].ch == NULL);
293 
294 	g_get_io_channel = true;
295 	g_create_ch = false;
296 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
297 	CU_ASSERT(g_ut_threads[0].ch == NULL);
298 
299 	g_get_io_channel = true;
300 	g_create_ch = true;
301 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
302 	CU_ASSERT(g_ut_threads[0].ch != NULL);
303 	spdk_put_io_channel(g_ut_threads[0].ch);
304 
305 	teardown_test();
306 }
307 
308 static int
309 poller_run_done(void *ctx)
310 {
311 	bool	*poller_run = ctx;
312 
313 	*poller_run = true;
314 
315 	return -1;
316 }
317 
318 static int
319 poller_run_times_done(void *ctx)
320 {
321 	int	*poller_run_times = ctx;
322 
323 	(*poller_run_times)++;
324 
325 	return -1;
326 }
327 
328 static void
329 basic_poller(void)
330 {
331 	struct spdk_poller	*poller = NULL;
332 	bool			poller_run = false;
333 	int			poller_run_times = 0;
334 
335 	setup_test();
336 
337 	set_thread(0);
338 	reset_time();
339 	/* Register a poller with no-wait time and test execution */
340 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
341 	CU_ASSERT(poller != NULL);
342 
343 	poll_threads();
344 	CU_ASSERT(poller_run == true);
345 
346 	spdk_poller_unregister(&poller);
347 	CU_ASSERT(poller == NULL);
348 
349 	/* Register a poller with 1000us wait time and test single execution */
350 	poller_run = false;
351 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
352 	CU_ASSERT(poller != NULL);
353 
354 	poll_threads();
355 	CU_ASSERT(poller_run == false);
356 
357 	increment_time(1000);
358 	poll_threads();
359 	CU_ASSERT(poller_run == true);
360 
361 	reset_time();
362 	poller_run = false;
363 	poll_threads();
364 	CU_ASSERT(poller_run == false);
365 
366 	increment_time(1000);
367 	poll_threads();
368 	CU_ASSERT(poller_run == true);
369 
370 	spdk_poller_unregister(&poller);
371 	CU_ASSERT(poller == NULL);
372 
373 	reset_time();
374 	/* Register a poller with 1000us wait time and test multiple execution */
375 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
376 	CU_ASSERT(poller != NULL);
377 
378 	poll_threads();
379 	CU_ASSERT(poller_run_times == 0);
380 
381 	increment_time(1000);
382 	poll_threads();
383 	CU_ASSERT(poller_run_times == 1);
384 
385 	poller_run_times = 0;
386 	increment_time(2000);
387 	poll_threads();
388 	CU_ASSERT(poller_run_times == 2);
389 
390 	spdk_poller_unregister(&poller);
391 	CU_ASSERT(poller == NULL);
392 
393 	teardown_test();
394 }
395 
396 static void
397 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
398 {
399 	bool *done = cb_arg;
400 
401 	CU_ASSERT(success == true);
402 	*done = true;
403 	spdk_bdev_free_io(bdev_io);
404 }
405 
406 static void
407 put_channel_during_reset(void)
408 {
409 	struct spdk_io_channel *io_ch;
410 	bool done = false;
411 
412 	setup_test();
413 
414 	set_thread(0);
415 	io_ch = spdk_bdev_get_io_channel(g_desc);
416 	CU_ASSERT(io_ch != NULL);
417 
418 	/*
419 	 * Start a reset, but then put the I/O channel before
420 	 *  the deferred messages for the reset get a chance to
421 	 *  execute.
422 	 */
423 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
424 	spdk_put_io_channel(io_ch);
425 	poll_threads();
426 	stub_complete_io(g_bdev.io_target, 0);
427 
428 	teardown_test();
429 }
430 
431 static void
432 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
433 {
434 	enum spdk_bdev_io_status *status = cb_arg;
435 
436 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
437 	spdk_bdev_free_io(bdev_io);
438 }
439 
440 static void
441 aborted_reset(void)
442 {
443 	struct spdk_io_channel *io_ch[2];
444 	enum spdk_bdev_io_status status1, status2;
445 
446 	setup_test();
447 
448 	set_thread(0);
449 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
450 	CU_ASSERT(io_ch[0] != NULL);
451 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
452 	poll_threads();
453 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
454 
455 	/*
456 	 * First reset has been submitted on ch0.  Now submit a second
457 	 *  reset on ch1 which will get queued since there is already a
458 	 *  reset in progress.
459 	 */
460 	set_thread(1);
461 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
462 	CU_ASSERT(io_ch[1] != NULL);
463 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
464 	poll_threads();
465 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
466 
467 	/*
468 	 * Now destroy ch1.  This will abort the queued reset.  Check that
469 	 *  the second reset was completed with failed status.  Also check
470 	 *  that bdev->reset_in_progress != NULL, since the original reset
471 	 *  has not been completed yet.  This ensures that the bdev code is
472 	 *  correctly noticing that the failed reset is *not* the one that
473 	 *  had been submitted to the bdev module.
474 	 */
475 	set_thread(1);
476 	spdk_put_io_channel(io_ch[1]);
477 	poll_threads();
478 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
479 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
480 
481 	/*
482 	 * Now complete the first reset, verify that it completed with SUCCESS
483 	 *  status and that bdev->reset_in_progress is also set back to NULL.
484 	 */
485 	set_thread(0);
486 	spdk_put_io_channel(io_ch[0]);
487 	stub_complete_io(g_bdev.io_target, 0);
488 	poll_threads();
489 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
490 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
491 
492 	teardown_test();
493 }
494 
495 static void
496 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
497 {
498 	enum spdk_bdev_io_status *status = cb_arg;
499 
500 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
501 	spdk_bdev_free_io(bdev_io);
502 }
503 
504 static void
505 io_during_reset(void)
506 {
507 	struct spdk_io_channel *io_ch[2];
508 	struct spdk_bdev_channel *bdev_ch[2];
509 	enum spdk_bdev_io_status status0, status1, status_reset;
510 	int rc;
511 
512 	setup_test();
513 
514 	/*
515 	 * First test normal case - submit an I/O on each of two channels (with no resets)
516 	 *  and verify they complete successfully.
517 	 */
518 	set_thread(0);
519 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
520 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
521 	CU_ASSERT(bdev_ch[0]->flags == 0);
522 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
523 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
524 	CU_ASSERT(rc == 0);
525 
526 	set_thread(1);
527 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
528 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
529 	CU_ASSERT(bdev_ch[1]->flags == 0);
530 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
531 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
532 	CU_ASSERT(rc == 0);
533 
534 	poll_threads();
535 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
536 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
537 
538 	set_thread(0);
539 	stub_complete_io(g_bdev.io_target, 0);
540 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
541 
542 	set_thread(1);
543 	stub_complete_io(g_bdev.io_target, 0);
544 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
545 
546 	/*
547 	 * Now submit a reset, and leave it pending while we submit I/O on two different
548 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
549 	 *  progress.
550 	 */
551 	set_thread(0);
552 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
553 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
554 	CU_ASSERT(rc == 0);
555 
556 	CU_ASSERT(bdev_ch[0]->flags == 0);
557 	CU_ASSERT(bdev_ch[1]->flags == 0);
558 	poll_threads();
559 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
560 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
561 
562 	set_thread(0);
563 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
564 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
565 	CU_ASSERT(rc == 0);
566 
567 	set_thread(1);
568 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
569 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
570 	CU_ASSERT(rc == 0);
571 
572 	/*
573 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
574 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
575 	 */
576 	poll_threads();
577 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
578 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
579 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
580 
581 	/*
582 	 * Complete the reset
583 	 */
584 	set_thread(0);
585 	stub_complete_io(g_bdev.io_target, 0);
586 
587 	/*
588 	 * Only poll thread 0. We should not get a completion.
589 	 */
590 	poll_thread(0);
591 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
592 
593 	/*
594 	 * Poll both thread 0 and 1 so the messages can propagate and we
595 	 * get a completion.
596 	 */
597 	poll_threads();
598 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
599 
600 	spdk_put_io_channel(io_ch[0]);
601 	set_thread(1);
602 	spdk_put_io_channel(io_ch[1]);
603 	poll_threads();
604 
605 	teardown_test();
606 }
607 
608 static void
609 basic_qos(void)
610 {
611 	struct spdk_io_channel *io_ch[2];
612 	struct spdk_bdev_channel *bdev_ch[2];
613 	struct spdk_bdev *bdev;
614 	enum spdk_bdev_io_status status;
615 	int rc;
616 
617 	setup_test();
618 
619 	/* Enable QoS */
620 	bdev = &g_bdev.bdev;
621 	bdev->qos = calloc(1, sizeof(*bdev->qos));
622 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
623 	TAILQ_INIT(&bdev->qos->queued);
624 	/*
625 	 * Enable both IOPS and bandwidth rate limits.
626 	 * In this case, both rate limits will take equal effect.
627 	 */
628 	bdev->qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
629 	bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
630 
631 	g_get_io_channel = true;
632 
633 	set_thread(0);
634 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
635 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
636 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
637 
638 	set_thread(1);
639 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
640 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
641 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
642 
643 	/*
644 	 * Send an I/O on thread 0, which is where the QoS thread is running.
645 	 */
646 	set_thread(0);
647 	status = SPDK_BDEV_IO_STATUS_PENDING;
648 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
649 	CU_ASSERT(rc == 0);
650 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
651 	poll_threads();
652 	stub_complete_io(g_bdev.io_target, 0);
653 	poll_threads();
654 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
655 
656 	/* Send an I/O on thread 1. The QoS thread is not running here. */
657 	status = SPDK_BDEV_IO_STATUS_PENDING;
658 	set_thread(1);
659 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
660 	CU_ASSERT(rc == 0);
661 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
662 	poll_threads();
663 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
664 	stub_complete_io(g_bdev.io_target, 0);
665 	poll_threads();
666 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
667 	/* Now complete I/O on thread 0 */
668 	set_thread(0);
669 	poll_threads();
670 	stub_complete_io(g_bdev.io_target, 0);
671 	poll_threads();
672 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
673 
674 	/* Tear down the channels */
675 	set_thread(0);
676 	spdk_put_io_channel(io_ch[0]);
677 	set_thread(1);
678 	spdk_put_io_channel(io_ch[1]);
679 	poll_threads();
680 	set_thread(0);
681 
682 	/* Close the descriptor, which should stop the qos channel */
683 	spdk_bdev_close(g_desc);
684 	poll_threads();
685 	CU_ASSERT(bdev->qos->ch == NULL);
686 
687 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
688 
689 	/* Create the channels in reverse order. */
690 	set_thread(1);
691 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
692 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
693 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
694 
695 	set_thread(0);
696 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
697 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
698 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
699 
700 	/* Confirm that the qos thread is now thread 1 */
701 	CU_ASSERT(bdev->qos->ch == bdev_ch[1]);
702 
703 	/* Tear down the channels */
704 	set_thread(0);
705 	spdk_put_io_channel(io_ch[0]);
706 	set_thread(1);
707 	spdk_put_io_channel(io_ch[1]);
708 	poll_threads();
709 
710 	set_thread(0);
711 
712 	teardown_test();
713 }
714 
715 static void
716 io_during_qos_queue(void)
717 {
718 	struct spdk_io_channel *io_ch[2];
719 	struct spdk_bdev_channel *bdev_ch[2];
720 	struct spdk_bdev *bdev;
721 	enum spdk_bdev_io_status status0, status1;
722 	int rc;
723 
724 	setup_test();
725 	reset_time();
726 
727 	/* Enable QoS */
728 	bdev = &g_bdev.bdev;
729 	bdev->qos = calloc(1, sizeof(*bdev->qos));
730 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
731 	TAILQ_INIT(&bdev->qos->queued);
732 	/*
733 	 * Enable both IOPS and bandwidth rate limits.
734 	 * In this case, IOPS rate limit will take effect first.
735 	 */
736 	bdev->qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
737 	bdev->qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
738 
739 	g_get_io_channel = true;
740 
741 	/* Create channels */
742 	set_thread(0);
743 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
744 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
745 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
746 
747 	set_thread(1);
748 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
749 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
750 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
751 
752 	/* Send two I/O */
753 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
754 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
755 	CU_ASSERT(rc == 0);
756 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
757 	set_thread(0);
758 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
759 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
760 	CU_ASSERT(rc == 0);
761 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
762 
763 	/* Complete any I/O that arrived at the disk */
764 	poll_threads();
765 	set_thread(1);
766 	stub_complete_io(g_bdev.io_target, 0);
767 	set_thread(0);
768 	stub_complete_io(g_bdev.io_target, 0);
769 	poll_threads();
770 
771 	/* Only one of the I/O should complete. (logical XOR) */
772 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
773 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
774 	} else {
775 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
776 	}
777 
778 	/* Advance in time by a millisecond */
779 	increment_time(1000);
780 
781 	/* Complete more I/O */
782 	poll_threads();
783 	set_thread(1);
784 	stub_complete_io(g_bdev.io_target, 0);
785 	set_thread(0);
786 	stub_complete_io(g_bdev.io_target, 0);
787 	poll_threads();
788 
789 	/* Now the second I/O should be done */
790 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
791 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
792 
793 	/* Tear down the channels */
794 	set_thread(1);
795 	spdk_put_io_channel(io_ch[1]);
796 	set_thread(0);
797 	spdk_put_io_channel(io_ch[0]);
798 	poll_threads();
799 
800 	teardown_test();
801 }
802 
803 static void
804 io_during_qos_reset(void)
805 {
806 	struct spdk_io_channel *io_ch[2];
807 	struct spdk_bdev_channel *bdev_ch[2];
808 	struct spdk_bdev *bdev;
809 	enum spdk_bdev_io_status status0, status1, reset_status;
810 	int rc;
811 
812 	setup_test();
813 	reset_time();
814 
815 	/* Enable QoS */
816 	bdev = &g_bdev.bdev;
817 	bdev->qos = calloc(1, sizeof(*bdev->qos));
818 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
819 	TAILQ_INIT(&bdev->qos->queued);
820 	/*
821 	 * Enable both IOPS and bandwidth rate limits.
822 	 * In this case, bandwidth rate limit will take effect first.
823 	 */
824 	bdev->qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
825 	bdev->qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
826 
827 	g_get_io_channel = true;
828 
829 	/* Create channels */
830 	set_thread(0);
831 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
832 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
833 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
834 
835 	set_thread(1);
836 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
837 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
838 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
839 
840 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
841 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
842 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
843 	CU_ASSERT(rc == 0);
844 	set_thread(0);
845 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
846 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
847 	CU_ASSERT(rc == 0);
848 
849 	poll_threads();
850 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
851 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
852 
853 	/* Reset the bdev. */
854 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
855 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
856 	CU_ASSERT(rc == 0);
857 
858 	/* Complete any I/O that arrived at the disk */
859 	poll_threads();
860 	set_thread(1);
861 	stub_complete_io(g_bdev.io_target, 0);
862 	set_thread(0);
863 	stub_complete_io(g_bdev.io_target, 0);
864 	poll_threads();
865 
866 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
867 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
868 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
869 
870 	/* Tear down the channels */
871 	set_thread(1);
872 	spdk_put_io_channel(io_ch[1]);
873 	set_thread(0);
874 	spdk_put_io_channel(io_ch[0]);
875 	poll_threads();
876 
877 	teardown_test();
878 }
879 
880 static void
881 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
882 {
883 	enum spdk_bdev_io_status *status = cb_arg;
884 
885 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
886 	spdk_bdev_free_io(bdev_io);
887 }
888 
889 static void
890 enomem(void)
891 {
892 	struct spdk_io_channel *io_ch;
893 	struct spdk_bdev_channel *bdev_ch;
894 	struct spdk_bdev_shared_resource *shared_resource;
895 	struct ut_bdev_channel *ut_ch;
896 	const uint32_t IO_ARRAY_SIZE = 64;
897 	const uint32_t AVAIL = 20;
898 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
899 	uint32_t nomem_cnt, i;
900 	struct spdk_bdev_io *first_io;
901 	int rc;
902 
903 	setup_test();
904 
905 	set_thread(0);
906 	io_ch = spdk_bdev_get_io_channel(g_desc);
907 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
908 	shared_resource = bdev_ch->shared_resource;
909 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
910 	ut_ch->avail_cnt = AVAIL;
911 
912 	/* First submit a number of IOs equal to what the channel can support. */
913 	for (i = 0; i < AVAIL; i++) {
914 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
915 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
916 		CU_ASSERT(rc == 0);
917 	}
918 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
919 
920 	/*
921 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
922 	 *  the enomem_io list.
923 	 */
924 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
925 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
926 	CU_ASSERT(rc == 0);
927 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
928 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
929 
930 	/*
931 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
932 	 *  the first_io above.
933 	 */
934 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
935 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
936 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
937 		CU_ASSERT(rc == 0);
938 	}
939 
940 	/* Assert that first_io is still at the head of the list. */
941 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
942 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
943 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
944 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
945 
946 	/*
947 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
948 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
949 	 *  list.
950 	 */
951 	stub_complete_io(g_bdev.io_target, 1);
952 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
953 
954 	/*
955 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
956 	 *  and we should see I/O get resubmitted to the test bdev module.
957 	 */
958 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
959 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
960 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
961 
962 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
963 	stub_complete_io(g_bdev.io_target, 1);
964 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
965 
966 	/*
967 	 * Send a reset and confirm that all I/O are completed, including the ones that
968 	 *  were queued on the nomem_io list.
969 	 */
970 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
971 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
972 	poll_threads();
973 	CU_ASSERT(rc == 0);
974 	/* This will complete the reset. */
975 	stub_complete_io(g_bdev.io_target, 0);
976 
977 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
978 	CU_ASSERT(shared_resource->io_outstanding == 0);
979 
980 	spdk_put_io_channel(io_ch);
981 	poll_threads();
982 	teardown_test();
983 }
984 
985 static void
986 enomem_multi_bdev(void)
987 {
988 	struct spdk_io_channel *io_ch;
989 	struct spdk_bdev_channel *bdev_ch;
990 	struct spdk_bdev_shared_resource *shared_resource;
991 	struct ut_bdev_channel *ut_ch;
992 	const uint32_t IO_ARRAY_SIZE = 64;
993 	const uint32_t AVAIL = 20;
994 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
995 	uint32_t i;
996 	struct ut_bdev *second_bdev;
997 	struct spdk_bdev_desc *second_desc;
998 	struct spdk_bdev_channel *second_bdev_ch;
999 	struct spdk_io_channel *second_ch;
1000 	int rc;
1001 
1002 	setup_test();
1003 
1004 	/* Register second bdev with the same io_target  */
1005 	second_bdev = calloc(1, sizeof(*second_bdev));
1006 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1007 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1008 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1009 
1010 	set_thread(0);
1011 	io_ch = spdk_bdev_get_io_channel(g_desc);
1012 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1013 	shared_resource = bdev_ch->shared_resource;
1014 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1015 	ut_ch->avail_cnt = AVAIL;
1016 
1017 	second_ch = spdk_bdev_get_io_channel(second_desc);
1018 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1019 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1020 
1021 	/* Saturate io_target through bdev A. */
1022 	for (i = 0; i < AVAIL; i++) {
1023 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1024 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1025 		CU_ASSERT(rc == 0);
1026 	}
1027 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1028 
1029 	/*
1030 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1031 	 * and then go onto the nomem_io list.
1032 	 */
1033 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1034 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1035 	CU_ASSERT(rc == 0);
1036 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1037 
1038 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1039 	stub_complete_io(g_bdev.io_target, AVAIL);
1040 
1041 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1042 	CU_ASSERT(shared_resource->io_outstanding == 1);
1043 
1044 	/* Now complete our retried I/O  */
1045 	stub_complete_io(g_bdev.io_target, 1);
1046 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1047 
1048 	spdk_put_io_channel(io_ch);
1049 	spdk_put_io_channel(second_ch);
1050 	spdk_bdev_close(second_desc);
1051 	unregister_bdev(second_bdev);
1052 	poll_threads();
1053 	free(second_bdev);
1054 	teardown_test();
1055 }
1056 
1057 
1058 static void
1059 enomem_multi_io_target(void)
1060 {
1061 	struct spdk_io_channel *io_ch;
1062 	struct spdk_bdev_channel *bdev_ch;
1063 	struct ut_bdev_channel *ut_ch;
1064 	const uint32_t IO_ARRAY_SIZE = 64;
1065 	const uint32_t AVAIL = 20;
1066 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1067 	uint32_t i;
1068 	int new_io_device;
1069 	struct ut_bdev *second_bdev;
1070 	struct spdk_bdev_desc *second_desc;
1071 	struct spdk_bdev_channel *second_bdev_ch;
1072 	struct spdk_io_channel *second_ch;
1073 	int rc;
1074 
1075 	setup_test();
1076 
1077 	/* Create new io_target and a second bdev using it */
1078 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1079 				sizeof(struct ut_bdev_channel));
1080 	second_bdev = calloc(1, sizeof(*second_bdev));
1081 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1082 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1083 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1084 
1085 	set_thread(0);
1086 	io_ch = spdk_bdev_get_io_channel(g_desc);
1087 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1088 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1089 	ut_ch->avail_cnt = AVAIL;
1090 
1091 	/* Different io_target should imply a different shared_resource */
1092 	second_ch = spdk_bdev_get_io_channel(second_desc);
1093 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1094 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1095 
1096 	/* Saturate io_target through bdev A. */
1097 	for (i = 0; i < AVAIL; i++) {
1098 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1099 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1100 		CU_ASSERT(rc == 0);
1101 	}
1102 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1103 
1104 	/* Issue one more I/O to fill ENOMEM list. */
1105 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1106 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1107 	CU_ASSERT(rc == 0);
1108 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1109 
1110 	/*
1111 	 * Now submit I/O through the second bdev. This should go through and complete
1112 	 * successfully because we're using a different io_device underneath.
1113 	 */
1114 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1115 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1116 	CU_ASSERT(rc == 0);
1117 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1118 	stub_complete_io(second_bdev->io_target, 1);
1119 
1120 	/* Cleanup; Complete outstanding I/O. */
1121 	stub_complete_io(g_bdev.io_target, AVAIL);
1122 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1123 	/* Complete the ENOMEM I/O */
1124 	stub_complete_io(g_bdev.io_target, 1);
1125 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1126 
1127 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1128 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1129 	spdk_put_io_channel(io_ch);
1130 	spdk_put_io_channel(second_ch);
1131 	spdk_bdev_close(second_desc);
1132 	unregister_bdev(second_bdev);
1133 	spdk_io_device_unregister(&new_io_device, NULL);
1134 	poll_threads();
1135 	free(second_bdev);
1136 	teardown_test();
1137 }
1138 
1139 static void
1140 qos_dynamic_enable_done(void *cb_arg, int status)
1141 {
1142 	int *rc = cb_arg;
1143 	*rc = status;
1144 }
1145 
1146 static void
1147 qos_dynamic_enable(void)
1148 {
1149 	struct spdk_io_channel *io_ch[2];
1150 	struct spdk_bdev_channel *bdev_ch[2];
1151 	struct spdk_bdev *bdev;
1152 	int status, second_status;
1153 
1154 	setup_test();
1155 	reset_time();
1156 
1157 	bdev = &g_bdev.bdev;
1158 
1159 	g_get_io_channel = true;
1160 
1161 	/* Create channels */
1162 	set_thread(0);
1163 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1164 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1165 	CU_ASSERT(bdev_ch[0]->flags == 0);
1166 
1167 	set_thread(1);
1168 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1169 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1170 	CU_ASSERT(bdev_ch[1]->flags == 0);
1171 
1172 	set_thread(0);
1173 
1174 	/* Enable QoS */
1175 	status = -1;
1176 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1177 	poll_threads();
1178 	CU_ASSERT(status == 0);
1179 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1180 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1181 
1182 	/* Disable QoS */
1183 	status = -1;
1184 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1185 	poll_threads();
1186 	CU_ASSERT(status == 0);
1187 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1188 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1189 
1190 	/* Disable QoS again */
1191 	status = -1;
1192 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1193 	poll_threads();
1194 	CU_ASSERT(status == 0); /* This should succeed */
1195 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1196 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1197 
1198 	/* Enable QoS on thread 0 */
1199 	status = -1;
1200 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1201 	poll_threads();
1202 	CU_ASSERT(status == 0);
1203 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1204 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1205 
1206 	/* Disable QoS on thread 1 */
1207 	set_thread(1);
1208 	status = -1;
1209 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1210 	/* Don't poll yet. This should leave the channels with QoS enabled */
1211 	CU_ASSERT(status == -1);
1212 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1213 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1214 
1215 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1216 	second_status = 0;
1217 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1218 	poll_threads();
1219 	CU_ASSERT(status == 0); /* The disable should succeed */
1220 	CU_ASSERT(second_status < 0); /* The enable should fail */
1221 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1222 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1223 
1224 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1225 	status = -1;
1226 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1227 	poll_threads();
1228 	CU_ASSERT(status == 0);
1229 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1230 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1231 
1232 	/* Tear down the channels */
1233 	set_thread(0);
1234 	spdk_put_io_channel(io_ch[0]);
1235 	set_thread(1);
1236 	spdk_put_io_channel(io_ch[1]);
1237 	poll_threads();
1238 
1239 	set_thread(0);
1240 	teardown_test();
1241 }
1242 
1243 int
1244 main(int argc, char **argv)
1245 {
1246 	CU_pSuite	suite = NULL;
1247 	unsigned int	num_failures;
1248 
1249 	if (CU_initialize_registry() != CUE_SUCCESS) {
1250 		return CU_get_error();
1251 	}
1252 
1253 	suite = CU_add_suite("bdev", NULL, NULL);
1254 	if (suite == NULL) {
1255 		CU_cleanup_registry();
1256 		return CU_get_error();
1257 	}
1258 
1259 	if (
1260 		CU_add_test(suite, "basic", basic) == NULL ||
1261 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1262 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1263 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1264 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1265 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1266 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1267 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1268 		CU_add_test(suite, "enomem", enomem) == NULL ||
1269 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1270 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1271 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1272 	) {
1273 		CU_cleanup_registry();
1274 		return CU_get_error();
1275 	}
1276 
1277 	CU_basic_set_mode(CU_BRM_VERBOSE);
1278 	CU_basic_run_tests();
1279 	num_failures = CU_get_number_of_failures();
1280 	CU_cleanup_registry();
1281 	return num_failures;
1282 }
1283