xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 5dcd6f631863aa01b2e4679f0485f7958739a370)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct ut_bdev {
57 	struct spdk_bdev	bdev;
58 	void			*io_target;
59 };
60 
61 struct ut_bdev_channel {
62 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
63 	uint32_t			outstanding_cnt;
64 	uint32_t			avail_cnt;
65 };
66 
67 int g_io_device;
68 struct ut_bdev g_bdev;
69 struct spdk_bdev_desc *g_desc;
70 bool g_teardown_done = false;
71 bool g_get_io_channel = true;
72 bool g_create_ch = true;
73 bool g_init_complete_called = false;
74 
75 static int
76 stub_create_ch(void *io_device, void *ctx_buf)
77 {
78 	struct ut_bdev_channel *ch = ctx_buf;
79 
80 	if (g_create_ch == false) {
81 		return -1;
82 	}
83 
84 	TAILQ_INIT(&ch->outstanding_io);
85 	ch->outstanding_cnt = 0;
86 	/*
87 	 * When avail gets to 0, the submit_request function will return ENOMEM.
88 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
89 	 *  big value that won't get hit.  The ENOMEM tests can then override this
90 	 *  value to something much smaller to induce ENOMEM conditions.
91 	 */
92 	ch->avail_cnt = 2048;
93 	return 0;
94 }
95 
96 static void
97 stub_destroy_ch(void *io_device, void *ctx_buf)
98 {
99 }
100 
101 static struct spdk_io_channel *
102 stub_get_io_channel(void *ctx)
103 {
104 	struct ut_bdev *ut_bdev = ctx;
105 
106 	if (g_get_io_channel == true) {
107 		return spdk_get_io_channel(ut_bdev->io_target);
108 	} else {
109 		return NULL;
110 	}
111 }
112 
113 static int
114 stub_destruct(void *ctx)
115 {
116 	return 0;
117 }
118 
119 static void
120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
121 {
122 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
123 
124 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
125 		struct spdk_bdev_io *io;
126 
127 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
128 			io = TAILQ_FIRST(&ch->outstanding_io);
129 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
130 			ch->outstanding_cnt--;
131 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
132 			ch->avail_cnt++;
133 		}
134 	}
135 
136 	if (ch->avail_cnt > 0) {
137 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
138 		ch->outstanding_cnt++;
139 		ch->avail_cnt--;
140 	} else {
141 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
142 	}
143 }
144 
145 static uint32_t
146 stub_complete_io(void *io_target, uint32_t num_to_complete)
147 {
148 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
149 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
150 	struct spdk_bdev_io *io;
151 	bool complete_all = (num_to_complete == 0);
152 	uint32_t num_completed = 0;
153 
154 	while (complete_all || num_completed < num_to_complete) {
155 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
156 			break;
157 		}
158 		io = TAILQ_FIRST(&ch->outstanding_io);
159 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
160 		ch->outstanding_cnt--;
161 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
162 		ch->avail_cnt++;
163 		num_completed++;
164 	}
165 
166 	spdk_put_io_channel(_ch);
167 	return num_completed;
168 }
169 
170 static struct spdk_bdev_fn_table fn_table = {
171 	.get_io_channel =	stub_get_io_channel,
172 	.destruct =		stub_destruct,
173 	.submit_request =	stub_submit_request,
174 };
175 
176 static int
177 module_init(void)
178 {
179 	return 0;
180 }
181 
182 static void
183 module_fini(void)
184 {
185 }
186 
187 static void
188 init_complete(void)
189 {
190 	g_init_complete_called = true;
191 }
192 
193 struct spdk_bdev_module bdev_ut_if = {
194 	.name = "bdev_ut",
195 	.module_init = module_init,
196 	.module_fini = module_fini,
197 	.init_complete = init_complete,
198 };
199 
200 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
201 
202 static void
203 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
204 {
205 	memset(ut_bdev, 0, sizeof(*ut_bdev));
206 
207 	ut_bdev->io_target = io_target;
208 	ut_bdev->bdev.ctxt = ut_bdev;
209 	ut_bdev->bdev.name = name;
210 	ut_bdev->bdev.fn_table = &fn_table;
211 	ut_bdev->bdev.module = &bdev_ut_if;
212 	ut_bdev->bdev.blocklen = 4096;
213 	ut_bdev->bdev.blockcnt = 1024;
214 
215 	spdk_bdev_register(&ut_bdev->bdev);
216 }
217 
218 static void
219 unregister_bdev(struct ut_bdev *ut_bdev)
220 {
221 	/* Handle any deferred messages. */
222 	poll_threads();
223 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
224 }
225 
226 static void
227 bdev_init_cb(void *done, int rc)
228 {
229 	CU_ASSERT(rc == 0);
230 	*(bool *)done = true;
231 }
232 
233 static void
234 setup_test(void)
235 {
236 	bool done = false;
237 
238 	allocate_threads(BDEV_UT_NUM_THREADS);
239 	spdk_bdev_initialize(bdev_init_cb, &done);
240 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
241 				sizeof(struct ut_bdev_channel));
242 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
243 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
244 }
245 
246 static void
247 finish_cb(void *cb_arg)
248 {
249 	g_teardown_done = true;
250 }
251 
252 static void
253 teardown_test(void)
254 {
255 	g_teardown_done = false;
256 	spdk_bdev_close(g_desc);
257 	g_desc = NULL;
258 	unregister_bdev(&g_bdev);
259 	spdk_io_device_unregister(&g_io_device, NULL);
260 	spdk_bdev_finish(finish_cb, NULL);
261 	poll_threads();
262 	memset(&g_bdev, 0, sizeof(g_bdev));
263 	CU_ASSERT(g_teardown_done == true);
264 	g_teardown_done = false;
265 	free_threads();
266 }
267 
268 static uint32_t
269 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
270 {
271 	struct spdk_bdev_io *io;
272 	uint32_t cnt = 0;
273 
274 	TAILQ_FOREACH(io, tailq, internal.link) {
275 		cnt++;
276 	}
277 
278 	return cnt;
279 }
280 
281 static void
282 basic(void)
283 {
284 	g_init_complete_called = false;
285 	setup_test();
286 	CU_ASSERT(g_init_complete_called == true);
287 
288 	set_thread(0);
289 
290 	g_get_io_channel = false;
291 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
292 	CU_ASSERT(g_ut_threads[0].ch == NULL);
293 
294 	g_get_io_channel = true;
295 	g_create_ch = false;
296 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
297 	CU_ASSERT(g_ut_threads[0].ch == NULL);
298 
299 	g_get_io_channel = true;
300 	g_create_ch = true;
301 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
302 	CU_ASSERT(g_ut_threads[0].ch != NULL);
303 	spdk_put_io_channel(g_ut_threads[0].ch);
304 
305 	teardown_test();
306 }
307 
308 static void
309 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
310 {
311 	bool *done = cb_arg;
312 
313 	CU_ASSERT(success == true);
314 	*done = true;
315 	spdk_bdev_free_io(bdev_io);
316 }
317 
318 static void
319 put_channel_during_reset(void)
320 {
321 	struct spdk_io_channel *io_ch;
322 	bool done = false;
323 
324 	setup_test();
325 
326 	set_thread(0);
327 	io_ch = spdk_bdev_get_io_channel(g_desc);
328 	CU_ASSERT(io_ch != NULL);
329 
330 	/*
331 	 * Start a reset, but then put the I/O channel before
332 	 *  the deferred messages for the reset get a chance to
333 	 *  execute.
334 	 */
335 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
336 	spdk_put_io_channel(io_ch);
337 	poll_threads();
338 	stub_complete_io(g_bdev.io_target, 0);
339 
340 	teardown_test();
341 }
342 
343 static void
344 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
345 {
346 	enum spdk_bdev_io_status *status = cb_arg;
347 
348 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
349 	spdk_bdev_free_io(bdev_io);
350 }
351 
352 static void
353 aborted_reset(void)
354 {
355 	struct spdk_io_channel *io_ch[2];
356 	enum spdk_bdev_io_status status1, status2;
357 
358 	setup_test();
359 
360 	set_thread(0);
361 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
362 	CU_ASSERT(io_ch[0] != NULL);
363 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
364 	poll_threads();
365 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
366 
367 	/*
368 	 * First reset has been submitted on ch0.  Now submit a second
369 	 *  reset on ch1 which will get queued since there is already a
370 	 *  reset in progress.
371 	 */
372 	set_thread(1);
373 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
374 	CU_ASSERT(io_ch[1] != NULL);
375 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
376 	poll_threads();
377 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
378 
379 	/*
380 	 * Now destroy ch1.  This will abort the queued reset.  Check that
381 	 *  the second reset was completed with failed status.  Also check
382 	 *  that bdev->internal.reset_in_progress != NULL, since the
383 	 *  original reset has not been completed yet.  This ensures that
384 	 *  the bdev code is correctly noticing that the failed reset is
385 	 *  *not* the one that had been submitted to the bdev module.
386 	 */
387 	set_thread(1);
388 	spdk_put_io_channel(io_ch[1]);
389 	poll_threads();
390 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
391 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
392 
393 	/*
394 	 * Now complete the first reset, verify that it completed with SUCCESS
395 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
396 	 */
397 	set_thread(0);
398 	spdk_put_io_channel(io_ch[0]);
399 	stub_complete_io(g_bdev.io_target, 0);
400 	poll_threads();
401 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
402 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
403 
404 	teardown_test();
405 }
406 
407 static void
408 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
409 {
410 	enum spdk_bdev_io_status *status = cb_arg;
411 
412 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
413 	spdk_bdev_free_io(bdev_io);
414 }
415 
416 static void
417 io_during_reset(void)
418 {
419 	struct spdk_io_channel *io_ch[2];
420 	struct spdk_bdev_channel *bdev_ch[2];
421 	enum spdk_bdev_io_status status0, status1, status_reset;
422 	int rc;
423 
424 	setup_test();
425 
426 	/*
427 	 * First test normal case - submit an I/O on each of two channels (with no resets)
428 	 *  and verify they complete successfully.
429 	 */
430 	set_thread(0);
431 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
432 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
433 	CU_ASSERT(bdev_ch[0]->flags == 0);
434 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
435 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
436 	CU_ASSERT(rc == 0);
437 
438 	set_thread(1);
439 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
440 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
441 	CU_ASSERT(bdev_ch[1]->flags == 0);
442 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
443 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
444 	CU_ASSERT(rc == 0);
445 
446 	poll_threads();
447 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
448 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
449 
450 	set_thread(0);
451 	stub_complete_io(g_bdev.io_target, 0);
452 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
453 
454 	set_thread(1);
455 	stub_complete_io(g_bdev.io_target, 0);
456 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
457 
458 	/*
459 	 * Now submit a reset, and leave it pending while we submit I/O on two different
460 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
461 	 *  progress.
462 	 */
463 	set_thread(0);
464 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
465 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
466 	CU_ASSERT(rc == 0);
467 
468 	CU_ASSERT(bdev_ch[0]->flags == 0);
469 	CU_ASSERT(bdev_ch[1]->flags == 0);
470 	poll_threads();
471 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
472 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
473 
474 	set_thread(0);
475 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
476 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
477 	CU_ASSERT(rc == 0);
478 
479 	set_thread(1);
480 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
481 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
482 	CU_ASSERT(rc == 0);
483 
484 	/*
485 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
486 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
487 	 */
488 	poll_threads();
489 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
490 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
491 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
492 
493 	/*
494 	 * Complete the reset
495 	 */
496 	set_thread(0);
497 	stub_complete_io(g_bdev.io_target, 0);
498 
499 	/*
500 	 * Only poll thread 0. We should not get a completion.
501 	 */
502 	poll_thread(0);
503 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
504 
505 	/*
506 	 * Poll both thread 0 and 1 so the messages can propagate and we
507 	 * get a completion.
508 	 */
509 	poll_threads();
510 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
511 
512 	spdk_put_io_channel(io_ch[0]);
513 	set_thread(1);
514 	spdk_put_io_channel(io_ch[1]);
515 	poll_threads();
516 
517 	teardown_test();
518 }
519 
520 static void
521 basic_qos(void)
522 {
523 	struct spdk_io_channel *io_ch[2];
524 	struct spdk_bdev_channel *bdev_ch[2];
525 	struct spdk_bdev *bdev;
526 	enum spdk_bdev_io_status status;
527 	int rc;
528 
529 	setup_test();
530 
531 	/* Enable QoS */
532 	bdev = &g_bdev.bdev;
533 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
534 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
535 	TAILQ_INIT(&bdev->internal.qos->queued);
536 	/*
537 	 * Enable both IOPS and bandwidth rate limits.
538 	 * In this case, both rate limits will take equal effect.
539 	 */
540 	bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
541 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
542 
543 	g_get_io_channel = true;
544 
545 	set_thread(0);
546 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
547 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
548 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
549 
550 	set_thread(1);
551 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
552 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
553 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
554 
555 	/*
556 	 * Send an I/O on thread 0, which is where the QoS thread is running.
557 	 */
558 	set_thread(0);
559 	status = SPDK_BDEV_IO_STATUS_PENDING;
560 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
561 	CU_ASSERT(rc == 0);
562 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
563 	poll_threads();
564 	stub_complete_io(g_bdev.io_target, 0);
565 	poll_threads();
566 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
567 
568 	/* Send an I/O on thread 1. The QoS thread is not running here. */
569 	status = SPDK_BDEV_IO_STATUS_PENDING;
570 	set_thread(1);
571 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
572 	CU_ASSERT(rc == 0);
573 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
574 	poll_threads();
575 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
576 	stub_complete_io(g_bdev.io_target, 0);
577 	poll_threads();
578 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
579 	/* Now complete I/O on thread 0 */
580 	set_thread(0);
581 	poll_threads();
582 	stub_complete_io(g_bdev.io_target, 0);
583 	poll_threads();
584 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
585 
586 	/* Tear down the channels */
587 	set_thread(0);
588 	spdk_put_io_channel(io_ch[0]);
589 	set_thread(1);
590 	spdk_put_io_channel(io_ch[1]);
591 	poll_threads();
592 	set_thread(0);
593 
594 	/* Close the descriptor, which should stop the qos channel */
595 	spdk_bdev_close(g_desc);
596 	poll_threads();
597 	CU_ASSERT(bdev->internal.qos->ch == NULL);
598 
599 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
600 
601 	/* Create the channels in reverse order. */
602 	set_thread(1);
603 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
604 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
605 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
606 
607 	set_thread(0);
608 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
609 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
610 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
611 
612 	/* Confirm that the qos thread is now thread 1 */
613 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
614 
615 	/* Tear down the channels */
616 	set_thread(0);
617 	spdk_put_io_channel(io_ch[0]);
618 	set_thread(1);
619 	spdk_put_io_channel(io_ch[1]);
620 	poll_threads();
621 
622 	set_thread(0);
623 
624 	teardown_test();
625 }
626 
627 static void
628 io_during_qos_queue(void)
629 {
630 	struct spdk_io_channel *io_ch[2];
631 	struct spdk_bdev_channel *bdev_ch[2];
632 	struct spdk_bdev *bdev;
633 	enum spdk_bdev_io_status status0, status1;
634 	int rc;
635 
636 	setup_test();
637 	reset_time();
638 
639 	/* Enable QoS */
640 	bdev = &g_bdev.bdev;
641 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
642 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
643 	TAILQ_INIT(&bdev->internal.qos->queued);
644 	/*
645 	 * Enable both IOPS and bandwidth rate limits.
646 	 * In this case, IOPS rate limit will take effect first.
647 	 */
648 	bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
649 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
650 
651 	g_get_io_channel = true;
652 
653 	/* Create channels */
654 	set_thread(0);
655 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
656 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
657 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
658 
659 	set_thread(1);
660 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
661 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
662 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
663 
664 	/* Send two I/O */
665 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
666 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
667 	CU_ASSERT(rc == 0);
668 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
669 	set_thread(0);
670 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
671 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
672 	CU_ASSERT(rc == 0);
673 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
674 
675 	/* Complete any I/O that arrived at the disk */
676 	poll_threads();
677 	set_thread(1);
678 	stub_complete_io(g_bdev.io_target, 0);
679 	set_thread(0);
680 	stub_complete_io(g_bdev.io_target, 0);
681 	poll_threads();
682 
683 	/* Only one of the I/O should complete. (logical XOR) */
684 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
685 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
686 	} else {
687 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
688 	}
689 
690 	/* Advance in time by a millisecond */
691 	increment_time(1000);
692 
693 	/* Complete more I/O */
694 	poll_threads();
695 	set_thread(1);
696 	stub_complete_io(g_bdev.io_target, 0);
697 	set_thread(0);
698 	stub_complete_io(g_bdev.io_target, 0);
699 	poll_threads();
700 
701 	/* Now the second I/O should be done */
702 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
703 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
704 
705 	/* Tear down the channels */
706 	set_thread(1);
707 	spdk_put_io_channel(io_ch[1]);
708 	set_thread(0);
709 	spdk_put_io_channel(io_ch[0]);
710 	poll_threads();
711 
712 	teardown_test();
713 }
714 
715 static void
716 io_during_qos_reset(void)
717 {
718 	struct spdk_io_channel *io_ch[2];
719 	struct spdk_bdev_channel *bdev_ch[2];
720 	struct spdk_bdev *bdev;
721 	enum spdk_bdev_io_status status0, status1, reset_status;
722 	int rc;
723 
724 	setup_test();
725 	reset_time();
726 
727 	/* Enable QoS */
728 	bdev = &g_bdev.bdev;
729 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
730 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
731 	TAILQ_INIT(&bdev->internal.qos->queued);
732 	/*
733 	 * Enable both IOPS and bandwidth rate limits.
734 	 * In this case, bandwidth rate limit will take effect first.
735 	 */
736 	bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
737 	bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
738 
739 	g_get_io_channel = true;
740 
741 	/* Create channels */
742 	set_thread(0);
743 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
744 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
745 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
746 
747 	set_thread(1);
748 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
749 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
750 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
751 
752 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
753 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
754 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
755 	CU_ASSERT(rc == 0);
756 	set_thread(0);
757 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
758 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
759 	CU_ASSERT(rc == 0);
760 
761 	poll_threads();
762 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
763 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
764 
765 	/* Reset the bdev. */
766 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
767 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
768 	CU_ASSERT(rc == 0);
769 
770 	/* Complete any I/O that arrived at the disk */
771 	poll_threads();
772 	set_thread(1);
773 	stub_complete_io(g_bdev.io_target, 0);
774 	set_thread(0);
775 	stub_complete_io(g_bdev.io_target, 0);
776 	poll_threads();
777 
778 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
779 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
780 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
781 
782 	/* Tear down the channels */
783 	set_thread(1);
784 	spdk_put_io_channel(io_ch[1]);
785 	set_thread(0);
786 	spdk_put_io_channel(io_ch[0]);
787 	poll_threads();
788 
789 	teardown_test();
790 }
791 
792 static void
793 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
794 {
795 	enum spdk_bdev_io_status *status = cb_arg;
796 
797 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
798 	spdk_bdev_free_io(bdev_io);
799 }
800 
801 static void
802 enomem(void)
803 {
804 	struct spdk_io_channel *io_ch;
805 	struct spdk_bdev_channel *bdev_ch;
806 	struct spdk_bdev_shared_resource *shared_resource;
807 	struct ut_bdev_channel *ut_ch;
808 	const uint32_t IO_ARRAY_SIZE = 64;
809 	const uint32_t AVAIL = 20;
810 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
811 	uint32_t nomem_cnt, i;
812 	struct spdk_bdev_io *first_io;
813 	int rc;
814 
815 	setup_test();
816 
817 	set_thread(0);
818 	io_ch = spdk_bdev_get_io_channel(g_desc);
819 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
820 	shared_resource = bdev_ch->shared_resource;
821 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
822 	ut_ch->avail_cnt = AVAIL;
823 
824 	/* First submit a number of IOs equal to what the channel can support. */
825 	for (i = 0; i < AVAIL; i++) {
826 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
827 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
828 		CU_ASSERT(rc == 0);
829 	}
830 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
831 
832 	/*
833 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
834 	 *  the enomem_io list.
835 	 */
836 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
837 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
838 	CU_ASSERT(rc == 0);
839 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
840 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
841 
842 	/*
843 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
844 	 *  the first_io above.
845 	 */
846 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
847 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
848 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
849 		CU_ASSERT(rc == 0);
850 	}
851 
852 	/* Assert that first_io is still at the head of the list. */
853 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
854 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
855 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
856 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
857 
858 	/*
859 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
860 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
861 	 *  list.
862 	 */
863 	stub_complete_io(g_bdev.io_target, 1);
864 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
865 
866 	/*
867 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
868 	 *  and we should see I/O get resubmitted to the test bdev module.
869 	 */
870 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
871 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
872 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
873 
874 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
875 	stub_complete_io(g_bdev.io_target, 1);
876 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
877 
878 	/*
879 	 * Send a reset and confirm that all I/O are completed, including the ones that
880 	 *  were queued on the nomem_io list.
881 	 */
882 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
883 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
884 	poll_threads();
885 	CU_ASSERT(rc == 0);
886 	/* This will complete the reset. */
887 	stub_complete_io(g_bdev.io_target, 0);
888 
889 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
890 	CU_ASSERT(shared_resource->io_outstanding == 0);
891 
892 	spdk_put_io_channel(io_ch);
893 	poll_threads();
894 	teardown_test();
895 }
896 
897 static void
898 enomem_multi_bdev(void)
899 {
900 	struct spdk_io_channel *io_ch;
901 	struct spdk_bdev_channel *bdev_ch;
902 	struct spdk_bdev_shared_resource *shared_resource;
903 	struct ut_bdev_channel *ut_ch;
904 	const uint32_t IO_ARRAY_SIZE = 64;
905 	const uint32_t AVAIL = 20;
906 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
907 	uint32_t i;
908 	struct ut_bdev *second_bdev;
909 	struct spdk_bdev_desc *second_desc;
910 	struct spdk_bdev_channel *second_bdev_ch;
911 	struct spdk_io_channel *second_ch;
912 	int rc;
913 
914 	setup_test();
915 
916 	/* Register second bdev with the same io_target  */
917 	second_bdev = calloc(1, sizeof(*second_bdev));
918 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
919 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
920 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
921 
922 	set_thread(0);
923 	io_ch = spdk_bdev_get_io_channel(g_desc);
924 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
925 	shared_resource = bdev_ch->shared_resource;
926 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
927 	ut_ch->avail_cnt = AVAIL;
928 
929 	second_ch = spdk_bdev_get_io_channel(second_desc);
930 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
931 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
932 
933 	/* Saturate io_target through bdev A. */
934 	for (i = 0; i < AVAIL; i++) {
935 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
936 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
937 		CU_ASSERT(rc == 0);
938 	}
939 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
940 
941 	/*
942 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
943 	 * and then go onto the nomem_io list.
944 	 */
945 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
946 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
947 	CU_ASSERT(rc == 0);
948 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
949 
950 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
951 	stub_complete_io(g_bdev.io_target, AVAIL);
952 
953 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
954 	CU_ASSERT(shared_resource->io_outstanding == 1);
955 
956 	/* Now complete our retried I/O  */
957 	stub_complete_io(g_bdev.io_target, 1);
958 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
959 
960 	spdk_put_io_channel(io_ch);
961 	spdk_put_io_channel(second_ch);
962 	spdk_bdev_close(second_desc);
963 	unregister_bdev(second_bdev);
964 	poll_threads();
965 	free(second_bdev);
966 	teardown_test();
967 }
968 
969 
970 static void
971 enomem_multi_io_target(void)
972 {
973 	struct spdk_io_channel *io_ch;
974 	struct spdk_bdev_channel *bdev_ch;
975 	struct ut_bdev_channel *ut_ch;
976 	const uint32_t IO_ARRAY_SIZE = 64;
977 	const uint32_t AVAIL = 20;
978 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
979 	uint32_t i;
980 	int new_io_device;
981 	struct ut_bdev *second_bdev;
982 	struct spdk_bdev_desc *second_desc;
983 	struct spdk_bdev_channel *second_bdev_ch;
984 	struct spdk_io_channel *second_ch;
985 	int rc;
986 
987 	setup_test();
988 
989 	/* Create new io_target and a second bdev using it */
990 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
991 				sizeof(struct ut_bdev_channel));
992 	second_bdev = calloc(1, sizeof(*second_bdev));
993 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
994 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
995 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
996 
997 	set_thread(0);
998 	io_ch = spdk_bdev_get_io_channel(g_desc);
999 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1000 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1001 	ut_ch->avail_cnt = AVAIL;
1002 
1003 	/* Different io_target should imply a different shared_resource */
1004 	second_ch = spdk_bdev_get_io_channel(second_desc);
1005 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1006 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1007 
1008 	/* Saturate io_target through bdev A. */
1009 	for (i = 0; i < AVAIL; i++) {
1010 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1011 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1012 		CU_ASSERT(rc == 0);
1013 	}
1014 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1015 
1016 	/* Issue one more I/O to fill ENOMEM list. */
1017 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1018 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1019 	CU_ASSERT(rc == 0);
1020 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1021 
1022 	/*
1023 	 * Now submit I/O through the second bdev. This should go through and complete
1024 	 * successfully because we're using a different io_device underneath.
1025 	 */
1026 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1027 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1028 	CU_ASSERT(rc == 0);
1029 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1030 	stub_complete_io(second_bdev->io_target, 1);
1031 
1032 	/* Cleanup; Complete outstanding I/O. */
1033 	stub_complete_io(g_bdev.io_target, AVAIL);
1034 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1035 	/* Complete the ENOMEM I/O */
1036 	stub_complete_io(g_bdev.io_target, 1);
1037 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1038 
1039 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1040 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1041 	spdk_put_io_channel(io_ch);
1042 	spdk_put_io_channel(second_ch);
1043 	spdk_bdev_close(second_desc);
1044 	unregister_bdev(second_bdev);
1045 	spdk_io_device_unregister(&new_io_device, NULL);
1046 	poll_threads();
1047 	free(second_bdev);
1048 	teardown_test();
1049 }
1050 
1051 static void
1052 qos_dynamic_enable_done(void *cb_arg, int status)
1053 {
1054 	int *rc = cb_arg;
1055 	*rc = status;
1056 }
1057 
1058 static void
1059 qos_dynamic_enable(void)
1060 {
1061 	struct spdk_io_channel *io_ch[2];
1062 	struct spdk_bdev_channel *bdev_ch[2];
1063 	struct spdk_bdev *bdev;
1064 	int status, second_status;
1065 
1066 	setup_test();
1067 	reset_time();
1068 
1069 	bdev = &g_bdev.bdev;
1070 
1071 	g_get_io_channel = true;
1072 
1073 	/* Create channels */
1074 	set_thread(0);
1075 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1076 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1077 	CU_ASSERT(bdev_ch[0]->flags == 0);
1078 
1079 	set_thread(1);
1080 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1081 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1082 	CU_ASSERT(bdev_ch[1]->flags == 0);
1083 
1084 	set_thread(0);
1085 
1086 	/* Enable QoS */
1087 	status = -1;
1088 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1089 	poll_threads();
1090 	CU_ASSERT(status == 0);
1091 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1092 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1093 
1094 	/* Disable QoS */
1095 	status = -1;
1096 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1097 	poll_threads();
1098 	CU_ASSERT(status == 0);
1099 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1100 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1101 
1102 	/* Disable QoS again */
1103 	status = -1;
1104 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1105 	poll_threads();
1106 	CU_ASSERT(status == 0); /* This should succeed */
1107 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1108 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1109 
1110 	/* Enable QoS on thread 0 */
1111 	status = -1;
1112 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1113 	poll_threads();
1114 	CU_ASSERT(status == 0);
1115 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1116 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1117 
1118 	/* Disable QoS on thread 1 */
1119 	set_thread(1);
1120 	status = -1;
1121 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1122 	/* Don't poll yet. This should leave the channels with QoS enabled */
1123 	CU_ASSERT(status == -1);
1124 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1125 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1126 
1127 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1128 	second_status = 0;
1129 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1130 	poll_threads();
1131 	CU_ASSERT(status == 0); /* The disable should succeed */
1132 	CU_ASSERT(second_status < 0); /* The enable should fail */
1133 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1134 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1135 
1136 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1137 	status = -1;
1138 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1139 	poll_threads();
1140 	CU_ASSERT(status == 0);
1141 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1142 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1143 
1144 	/* Tear down the channels */
1145 	set_thread(0);
1146 	spdk_put_io_channel(io_ch[0]);
1147 	set_thread(1);
1148 	spdk_put_io_channel(io_ch[1]);
1149 	poll_threads();
1150 
1151 	set_thread(0);
1152 	teardown_test();
1153 }
1154 
1155 int
1156 main(int argc, char **argv)
1157 {
1158 	CU_pSuite	suite = NULL;
1159 	unsigned int	num_failures;
1160 
1161 	if (CU_initialize_registry() != CUE_SUCCESS) {
1162 		return CU_get_error();
1163 	}
1164 
1165 	suite = CU_add_suite("bdev", NULL, NULL);
1166 	if (suite == NULL) {
1167 		CU_cleanup_registry();
1168 		return CU_get_error();
1169 	}
1170 
1171 	if (
1172 		CU_add_test(suite, "basic", basic) == NULL ||
1173 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1174 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1175 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1176 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1177 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1178 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1179 		CU_add_test(suite, "enomem", enomem) == NULL ||
1180 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1181 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1182 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1183 	) {
1184 		CU_cleanup_registry();
1185 		return CU_get_error();
1186 	}
1187 
1188 	CU_basic_set_mode(CU_BRM_VERBOSE);
1189 	CU_basic_run_tests();
1190 	num_failures = CU_get_number_of_failures();
1191 	CU_cleanup_registry();
1192 	return num_failures;
1193 }
1194