xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 0d2745c94b03b159020b6812c6caddb4922e4449)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct ut_bdev {
57 	struct spdk_bdev	bdev;
58 	void			*io_target;
59 };
60 
61 struct ut_bdev_channel {
62 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
63 	uint32_t			outstanding_cnt;
64 	uint32_t			avail_cnt;
65 };
66 
67 int g_io_device;
68 struct ut_bdev g_bdev;
69 struct spdk_bdev_desc *g_desc;
70 bool g_teardown_done = false;
71 bool g_get_io_channel = true;
72 bool g_create_ch = true;
73 bool g_init_complete_called = false;
74 
75 static int
76 stub_create_ch(void *io_device, void *ctx_buf)
77 {
78 	struct ut_bdev_channel *ch = ctx_buf;
79 
80 	if (g_create_ch == false) {
81 		return -1;
82 	}
83 
84 	TAILQ_INIT(&ch->outstanding_io);
85 	ch->outstanding_cnt = 0;
86 	/*
87 	 * When avail gets to 0, the submit_request function will return ENOMEM.
88 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
89 	 *  big value that won't get hit.  The ENOMEM tests can then override this
90 	 *  value to something much smaller to induce ENOMEM conditions.
91 	 */
92 	ch->avail_cnt = 2048;
93 	return 0;
94 }
95 
96 static void
97 stub_destroy_ch(void *io_device, void *ctx_buf)
98 {
99 }
100 
101 static struct spdk_io_channel *
102 stub_get_io_channel(void *ctx)
103 {
104 	struct ut_bdev *ut_bdev = ctx;
105 
106 	if (g_get_io_channel == true) {
107 		return spdk_get_io_channel(ut_bdev->io_target);
108 	} else {
109 		return NULL;
110 	}
111 }
112 
113 static int
114 stub_destruct(void *ctx)
115 {
116 	return 0;
117 }
118 
119 static void
120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
121 {
122 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
123 
124 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
125 		struct spdk_bdev_io *io;
126 
127 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
128 			io = TAILQ_FIRST(&ch->outstanding_io);
129 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
130 			ch->outstanding_cnt--;
131 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
132 			ch->avail_cnt++;
133 		}
134 	}
135 
136 	if (ch->avail_cnt > 0) {
137 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
138 		ch->outstanding_cnt++;
139 		ch->avail_cnt--;
140 	} else {
141 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
142 	}
143 }
144 
145 static uint32_t
146 stub_complete_io(void *io_target, uint32_t num_to_complete)
147 {
148 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
149 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
150 	struct spdk_bdev_io *io;
151 	bool complete_all = (num_to_complete == 0);
152 	uint32_t num_completed = 0;
153 
154 	while (complete_all || num_completed < num_to_complete) {
155 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
156 			break;
157 		}
158 		io = TAILQ_FIRST(&ch->outstanding_io);
159 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
160 		ch->outstanding_cnt--;
161 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
162 		ch->avail_cnt++;
163 		num_completed++;
164 	}
165 
166 	spdk_put_io_channel(_ch);
167 	return num_completed;
168 }
169 
170 static struct spdk_bdev_fn_table fn_table = {
171 	.get_io_channel =	stub_get_io_channel,
172 	.destruct =		stub_destruct,
173 	.submit_request =	stub_submit_request,
174 };
175 
176 static int
177 module_init(void)
178 {
179 	return 0;
180 }
181 
182 static void
183 module_fini(void)
184 {
185 }
186 
187 static void
188 init_complete(void)
189 {
190 	g_init_complete_called = true;
191 }
192 
193 struct spdk_bdev_module bdev_ut_if = {
194 	.name = "bdev_ut",
195 	.module_init = module_init,
196 	.module_fini = module_fini,
197 	.init_complete = init_complete,
198 };
199 
200 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
201 
202 static void
203 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
204 {
205 	memset(ut_bdev, 0, sizeof(*ut_bdev));
206 
207 	ut_bdev->io_target = io_target;
208 	ut_bdev->bdev.ctxt = ut_bdev;
209 	ut_bdev->bdev.name = name;
210 	ut_bdev->bdev.fn_table = &fn_table;
211 	ut_bdev->bdev.module = &bdev_ut_if;
212 	ut_bdev->bdev.blocklen = 4096;
213 	ut_bdev->bdev.blockcnt = 1024;
214 
215 	spdk_bdev_register(&ut_bdev->bdev);
216 }
217 
218 static void
219 unregister_bdev(struct ut_bdev *ut_bdev)
220 {
221 	/* Handle any deferred messages. */
222 	poll_threads();
223 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
224 }
225 
226 static void
227 bdev_init_cb(void *done, int rc)
228 {
229 	CU_ASSERT(rc == 0);
230 	*(bool *)done = true;
231 }
232 
233 static void
234 setup_test(void)
235 {
236 	bool done = false;
237 
238 	allocate_threads(BDEV_UT_NUM_THREADS);
239 	spdk_bdev_initialize(bdev_init_cb, &done);
240 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
241 				sizeof(struct ut_bdev_channel));
242 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
243 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
244 }
245 
246 static void
247 finish_cb(void *cb_arg)
248 {
249 	g_teardown_done = true;
250 }
251 
252 static void
253 teardown_test(void)
254 {
255 	g_teardown_done = false;
256 	spdk_bdev_close(g_desc);
257 	g_desc = NULL;
258 	unregister_bdev(&g_bdev);
259 	spdk_io_device_unregister(&g_io_device, NULL);
260 	spdk_bdev_finish(finish_cb, NULL);
261 	poll_threads();
262 	memset(&g_bdev, 0, sizeof(g_bdev));
263 	CU_ASSERT(g_teardown_done == true);
264 	g_teardown_done = false;
265 	free_threads();
266 }
267 
268 static uint32_t
269 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
270 {
271 	struct spdk_bdev_io *io;
272 	uint32_t cnt = 0;
273 
274 	TAILQ_FOREACH(io, tailq, internal.link) {
275 		cnt++;
276 	}
277 
278 	return cnt;
279 }
280 
281 static void
282 basic(void)
283 {
284 	g_init_complete_called = false;
285 	setup_test();
286 	CU_ASSERT(g_init_complete_called == true);
287 
288 	set_thread(0);
289 
290 	g_get_io_channel = false;
291 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
292 	CU_ASSERT(g_ut_threads[0].ch == NULL);
293 
294 	g_get_io_channel = true;
295 	g_create_ch = false;
296 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
297 	CU_ASSERT(g_ut_threads[0].ch == NULL);
298 
299 	g_get_io_channel = true;
300 	g_create_ch = true;
301 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
302 	CU_ASSERT(g_ut_threads[0].ch != NULL);
303 	spdk_put_io_channel(g_ut_threads[0].ch);
304 
305 	teardown_test();
306 }
307 
308 static void
309 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
310 {
311 	bool *done = cb_arg;
312 
313 	CU_ASSERT(success == true);
314 	*done = true;
315 	spdk_bdev_free_io(bdev_io);
316 }
317 
318 static void
319 put_channel_during_reset(void)
320 {
321 	struct spdk_io_channel *io_ch;
322 	bool done = false;
323 
324 	setup_test();
325 
326 	set_thread(0);
327 	io_ch = spdk_bdev_get_io_channel(g_desc);
328 	CU_ASSERT(io_ch != NULL);
329 
330 	/*
331 	 * Start a reset, but then put the I/O channel before
332 	 *  the deferred messages for the reset get a chance to
333 	 *  execute.
334 	 */
335 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
336 	spdk_put_io_channel(io_ch);
337 	poll_threads();
338 	stub_complete_io(g_bdev.io_target, 0);
339 
340 	teardown_test();
341 }
342 
343 static void
344 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
345 {
346 	enum spdk_bdev_io_status *status = cb_arg;
347 
348 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
349 	spdk_bdev_free_io(bdev_io);
350 }
351 
352 static void
353 aborted_reset(void)
354 {
355 	struct spdk_io_channel *io_ch[2];
356 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
357 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
358 
359 	setup_test();
360 
361 	set_thread(0);
362 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
363 	CU_ASSERT(io_ch[0] != NULL);
364 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
365 	poll_threads();
366 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
367 
368 	/*
369 	 * First reset has been submitted on ch0.  Now submit a second
370 	 *  reset on ch1 which will get queued since there is already a
371 	 *  reset in progress.
372 	 */
373 	set_thread(1);
374 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
375 	CU_ASSERT(io_ch[1] != NULL);
376 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
377 	poll_threads();
378 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
379 
380 	/*
381 	 * Now destroy ch1.  This will abort the queued reset.  Check that
382 	 *  the second reset was completed with failed status.  Also check
383 	 *  that bdev->internal.reset_in_progress != NULL, since the
384 	 *  original reset has not been completed yet.  This ensures that
385 	 *  the bdev code is correctly noticing that the failed reset is
386 	 *  *not* the one that had been submitted to the bdev module.
387 	 */
388 	set_thread(1);
389 	spdk_put_io_channel(io_ch[1]);
390 	poll_threads();
391 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
392 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
393 
394 	/*
395 	 * Now complete the first reset, verify that it completed with SUCCESS
396 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
397 	 */
398 	set_thread(0);
399 	spdk_put_io_channel(io_ch[0]);
400 	stub_complete_io(g_bdev.io_target, 0);
401 	poll_threads();
402 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
403 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
404 
405 	teardown_test();
406 }
407 
408 static void
409 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
410 {
411 	enum spdk_bdev_io_status *status = cb_arg;
412 
413 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
414 	spdk_bdev_free_io(bdev_io);
415 }
416 
417 static void
418 io_during_reset(void)
419 {
420 	struct spdk_io_channel *io_ch[2];
421 	struct spdk_bdev_channel *bdev_ch[2];
422 	enum spdk_bdev_io_status status0, status1, status_reset;
423 	int rc;
424 
425 	setup_test();
426 
427 	/*
428 	 * First test normal case - submit an I/O on each of two channels (with no resets)
429 	 *  and verify they complete successfully.
430 	 */
431 	set_thread(0);
432 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
433 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
434 	CU_ASSERT(bdev_ch[0]->flags == 0);
435 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
436 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
437 	CU_ASSERT(rc == 0);
438 
439 	set_thread(1);
440 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
441 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
442 	CU_ASSERT(bdev_ch[1]->flags == 0);
443 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
444 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
445 	CU_ASSERT(rc == 0);
446 
447 	poll_threads();
448 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
449 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
450 
451 	set_thread(0);
452 	stub_complete_io(g_bdev.io_target, 0);
453 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
454 
455 	set_thread(1);
456 	stub_complete_io(g_bdev.io_target, 0);
457 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
458 
459 	/*
460 	 * Now submit a reset, and leave it pending while we submit I/O on two different
461 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
462 	 *  progress.
463 	 */
464 	set_thread(0);
465 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
466 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
467 	CU_ASSERT(rc == 0);
468 
469 	CU_ASSERT(bdev_ch[0]->flags == 0);
470 	CU_ASSERT(bdev_ch[1]->flags == 0);
471 	poll_threads();
472 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
473 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
474 
475 	set_thread(0);
476 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
477 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
478 	CU_ASSERT(rc == 0);
479 
480 	set_thread(1);
481 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
482 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
483 	CU_ASSERT(rc == 0);
484 
485 	/*
486 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
487 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
488 	 */
489 	poll_threads();
490 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
491 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
492 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
493 
494 	/*
495 	 * Complete the reset
496 	 */
497 	set_thread(0);
498 	stub_complete_io(g_bdev.io_target, 0);
499 
500 	/*
501 	 * Only poll thread 0. We should not get a completion.
502 	 */
503 	poll_thread(0);
504 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
505 
506 	/*
507 	 * Poll both thread 0 and 1 so the messages can propagate and we
508 	 * get a completion.
509 	 */
510 	poll_threads();
511 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
512 
513 	spdk_put_io_channel(io_ch[0]);
514 	set_thread(1);
515 	spdk_put_io_channel(io_ch[1]);
516 	poll_threads();
517 
518 	teardown_test();
519 }
520 
521 static void
522 basic_qos(void)
523 {
524 	struct spdk_io_channel *io_ch[2];
525 	struct spdk_bdev_channel *bdev_ch[2];
526 	struct spdk_bdev *bdev;
527 	enum spdk_bdev_io_status status;
528 	int rc;
529 
530 	setup_test();
531 
532 	/* Enable QoS */
533 	bdev = &g_bdev.bdev;
534 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
535 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
536 	TAILQ_INIT(&bdev->internal.qos->queued);
537 	/*
538 	 * Enable both IOPS and bandwidth rate limits.
539 	 * In this case, both rate limits will take equal effect.
540 	 */
541 	bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
542 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
543 
544 	g_get_io_channel = true;
545 
546 	set_thread(0);
547 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
548 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
549 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
550 
551 	set_thread(1);
552 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
553 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
554 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
555 
556 	/*
557 	 * Send an I/O on thread 0, which is where the QoS thread is running.
558 	 */
559 	set_thread(0);
560 	status = SPDK_BDEV_IO_STATUS_PENDING;
561 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
562 	CU_ASSERT(rc == 0);
563 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
564 	poll_threads();
565 	stub_complete_io(g_bdev.io_target, 0);
566 	poll_threads();
567 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
568 
569 	/* Send an I/O on thread 1. The QoS thread is not running here. */
570 	status = SPDK_BDEV_IO_STATUS_PENDING;
571 	set_thread(1);
572 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
573 	CU_ASSERT(rc == 0);
574 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
575 	poll_threads();
576 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
577 	stub_complete_io(g_bdev.io_target, 0);
578 	poll_threads();
579 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
580 	/* Now complete I/O on thread 0 */
581 	set_thread(0);
582 	poll_threads();
583 	stub_complete_io(g_bdev.io_target, 0);
584 	poll_threads();
585 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
586 
587 	/* Tear down the channels */
588 	set_thread(0);
589 	spdk_put_io_channel(io_ch[0]);
590 	set_thread(1);
591 	spdk_put_io_channel(io_ch[1]);
592 	poll_threads();
593 	set_thread(0);
594 
595 	/* Close the descriptor, which should stop the qos channel */
596 	spdk_bdev_close(g_desc);
597 	poll_threads();
598 	CU_ASSERT(bdev->internal.qos->ch == NULL);
599 
600 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
601 
602 	/* Create the channels in reverse order. */
603 	set_thread(1);
604 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
605 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
606 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
607 
608 	set_thread(0);
609 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
610 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
611 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
612 
613 	/* Confirm that the qos thread is now thread 1 */
614 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
615 
616 	/* Tear down the channels */
617 	set_thread(0);
618 	spdk_put_io_channel(io_ch[0]);
619 	set_thread(1);
620 	spdk_put_io_channel(io_ch[1]);
621 	poll_threads();
622 
623 	set_thread(0);
624 
625 	teardown_test();
626 }
627 
628 static void
629 io_during_qos_queue(void)
630 {
631 	struct spdk_io_channel *io_ch[2];
632 	struct spdk_bdev_channel *bdev_ch[2];
633 	struct spdk_bdev *bdev;
634 	enum spdk_bdev_io_status status0, status1;
635 	int rc;
636 
637 	setup_test();
638 	reset_time();
639 
640 	/* Enable QoS */
641 	bdev = &g_bdev.bdev;
642 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
643 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
644 	TAILQ_INIT(&bdev->internal.qos->queued);
645 	/*
646 	 * Enable both IOPS and bandwidth rate limits.
647 	 * In this case, IOPS rate limit will take effect first.
648 	 */
649 	bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
650 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
651 
652 	g_get_io_channel = true;
653 
654 	/* Create channels */
655 	set_thread(0);
656 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
657 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
658 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
659 
660 	set_thread(1);
661 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
662 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
663 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
664 
665 	/* Send two I/O */
666 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
667 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
668 	CU_ASSERT(rc == 0);
669 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
670 	set_thread(0);
671 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
672 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
673 	CU_ASSERT(rc == 0);
674 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
675 
676 	/* Complete any I/O that arrived at the disk */
677 	poll_threads();
678 	set_thread(1);
679 	stub_complete_io(g_bdev.io_target, 0);
680 	set_thread(0);
681 	stub_complete_io(g_bdev.io_target, 0);
682 	poll_threads();
683 
684 	/* Only one of the I/O should complete. (logical XOR) */
685 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
686 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
687 	} else {
688 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
689 	}
690 
691 	/* Advance in time by a millisecond */
692 	increment_time(1000);
693 
694 	/* Complete more I/O */
695 	poll_threads();
696 	set_thread(1);
697 	stub_complete_io(g_bdev.io_target, 0);
698 	set_thread(0);
699 	stub_complete_io(g_bdev.io_target, 0);
700 	poll_threads();
701 
702 	/* Now the second I/O should be done */
703 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
704 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
705 
706 	/* Tear down the channels */
707 	set_thread(1);
708 	spdk_put_io_channel(io_ch[1]);
709 	set_thread(0);
710 	spdk_put_io_channel(io_ch[0]);
711 	poll_threads();
712 
713 	teardown_test();
714 }
715 
716 static void
717 io_during_qos_reset(void)
718 {
719 	struct spdk_io_channel *io_ch[2];
720 	struct spdk_bdev_channel *bdev_ch[2];
721 	struct spdk_bdev *bdev;
722 	enum spdk_bdev_io_status status0, status1, reset_status;
723 	int rc;
724 
725 	setup_test();
726 	reset_time();
727 
728 	/* Enable QoS */
729 	bdev = &g_bdev.bdev;
730 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
731 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
732 	TAILQ_INIT(&bdev->internal.qos->queued);
733 	/*
734 	 * Enable both IOPS and bandwidth rate limits.
735 	 * In this case, bandwidth rate limit will take effect first.
736 	 */
737 	bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
738 	bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
739 
740 	g_get_io_channel = true;
741 
742 	/* Create channels */
743 	set_thread(0);
744 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
745 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
746 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
747 
748 	set_thread(1);
749 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
750 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
751 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
752 
753 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
754 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
755 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
756 	CU_ASSERT(rc == 0);
757 	set_thread(0);
758 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
759 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
760 	CU_ASSERT(rc == 0);
761 
762 	poll_threads();
763 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
764 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
765 
766 	/* Reset the bdev. */
767 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
768 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
769 	CU_ASSERT(rc == 0);
770 
771 	/* Complete any I/O that arrived at the disk */
772 	poll_threads();
773 	set_thread(1);
774 	stub_complete_io(g_bdev.io_target, 0);
775 	set_thread(0);
776 	stub_complete_io(g_bdev.io_target, 0);
777 	poll_threads();
778 
779 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
780 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
781 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
782 
783 	/* Tear down the channels */
784 	set_thread(1);
785 	spdk_put_io_channel(io_ch[1]);
786 	set_thread(0);
787 	spdk_put_io_channel(io_ch[0]);
788 	poll_threads();
789 
790 	teardown_test();
791 }
792 
793 static void
794 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
795 {
796 	enum spdk_bdev_io_status *status = cb_arg;
797 
798 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
799 	spdk_bdev_free_io(bdev_io);
800 }
801 
802 static void
803 enomem(void)
804 {
805 	struct spdk_io_channel *io_ch;
806 	struct spdk_bdev_channel *bdev_ch;
807 	struct spdk_bdev_shared_resource *shared_resource;
808 	struct ut_bdev_channel *ut_ch;
809 	const uint32_t IO_ARRAY_SIZE = 64;
810 	const uint32_t AVAIL = 20;
811 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
812 	uint32_t nomem_cnt, i;
813 	struct spdk_bdev_io *first_io;
814 	int rc;
815 
816 	setup_test();
817 
818 	set_thread(0);
819 	io_ch = spdk_bdev_get_io_channel(g_desc);
820 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
821 	shared_resource = bdev_ch->shared_resource;
822 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
823 	ut_ch->avail_cnt = AVAIL;
824 
825 	/* First submit a number of IOs equal to what the channel can support. */
826 	for (i = 0; i < AVAIL; i++) {
827 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
828 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
829 		CU_ASSERT(rc == 0);
830 	}
831 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
832 
833 	/*
834 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
835 	 *  the enomem_io list.
836 	 */
837 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
838 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
839 	CU_ASSERT(rc == 0);
840 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
841 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
842 
843 	/*
844 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
845 	 *  the first_io above.
846 	 */
847 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
848 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
849 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
850 		CU_ASSERT(rc == 0);
851 	}
852 
853 	/* Assert that first_io is still at the head of the list. */
854 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
855 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
856 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
857 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
858 
859 	/*
860 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
861 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
862 	 *  list.
863 	 */
864 	stub_complete_io(g_bdev.io_target, 1);
865 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
866 
867 	/*
868 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
869 	 *  and we should see I/O get resubmitted to the test bdev module.
870 	 */
871 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
872 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
873 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
874 
875 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
876 	stub_complete_io(g_bdev.io_target, 1);
877 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
878 
879 	/*
880 	 * Send a reset and confirm that all I/O are completed, including the ones that
881 	 *  were queued on the nomem_io list.
882 	 */
883 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
884 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
885 	poll_threads();
886 	CU_ASSERT(rc == 0);
887 	/* This will complete the reset. */
888 	stub_complete_io(g_bdev.io_target, 0);
889 
890 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
891 	CU_ASSERT(shared_resource->io_outstanding == 0);
892 
893 	spdk_put_io_channel(io_ch);
894 	poll_threads();
895 	teardown_test();
896 }
897 
898 static void
899 enomem_multi_bdev(void)
900 {
901 	struct spdk_io_channel *io_ch;
902 	struct spdk_bdev_channel *bdev_ch;
903 	struct spdk_bdev_shared_resource *shared_resource;
904 	struct ut_bdev_channel *ut_ch;
905 	const uint32_t IO_ARRAY_SIZE = 64;
906 	const uint32_t AVAIL = 20;
907 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
908 	uint32_t i;
909 	struct ut_bdev *second_bdev;
910 	struct spdk_bdev_desc *second_desc;
911 	struct spdk_bdev_channel *second_bdev_ch;
912 	struct spdk_io_channel *second_ch;
913 	int rc;
914 
915 	setup_test();
916 
917 	/* Register second bdev with the same io_target  */
918 	second_bdev = calloc(1, sizeof(*second_bdev));
919 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
920 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
921 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
922 
923 	set_thread(0);
924 	io_ch = spdk_bdev_get_io_channel(g_desc);
925 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
926 	shared_resource = bdev_ch->shared_resource;
927 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
928 	ut_ch->avail_cnt = AVAIL;
929 
930 	second_ch = spdk_bdev_get_io_channel(second_desc);
931 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
932 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
933 
934 	/* Saturate io_target through bdev A. */
935 	for (i = 0; i < AVAIL; i++) {
936 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
937 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
938 		CU_ASSERT(rc == 0);
939 	}
940 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
941 
942 	/*
943 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
944 	 * and then go onto the nomem_io list.
945 	 */
946 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
947 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
948 	CU_ASSERT(rc == 0);
949 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
950 
951 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
952 	stub_complete_io(g_bdev.io_target, AVAIL);
953 
954 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
955 	CU_ASSERT(shared_resource->io_outstanding == 1);
956 
957 	/* Now complete our retried I/O  */
958 	stub_complete_io(g_bdev.io_target, 1);
959 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
960 
961 	spdk_put_io_channel(io_ch);
962 	spdk_put_io_channel(second_ch);
963 	spdk_bdev_close(second_desc);
964 	unregister_bdev(second_bdev);
965 	poll_threads();
966 	free(second_bdev);
967 	teardown_test();
968 }
969 
970 
971 static void
972 enomem_multi_io_target(void)
973 {
974 	struct spdk_io_channel *io_ch;
975 	struct spdk_bdev_channel *bdev_ch;
976 	struct ut_bdev_channel *ut_ch;
977 	const uint32_t IO_ARRAY_SIZE = 64;
978 	const uint32_t AVAIL = 20;
979 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
980 	uint32_t i;
981 	int new_io_device;
982 	struct ut_bdev *second_bdev;
983 	struct spdk_bdev_desc *second_desc;
984 	struct spdk_bdev_channel *second_bdev_ch;
985 	struct spdk_io_channel *second_ch;
986 	int rc;
987 
988 	setup_test();
989 
990 	/* Create new io_target and a second bdev using it */
991 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
992 				sizeof(struct ut_bdev_channel));
993 	second_bdev = calloc(1, sizeof(*second_bdev));
994 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
995 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
996 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
997 
998 	set_thread(0);
999 	io_ch = spdk_bdev_get_io_channel(g_desc);
1000 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1001 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1002 	ut_ch->avail_cnt = AVAIL;
1003 
1004 	/* Different io_target should imply a different shared_resource */
1005 	second_ch = spdk_bdev_get_io_channel(second_desc);
1006 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1007 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1008 
1009 	/* Saturate io_target through bdev A. */
1010 	for (i = 0; i < AVAIL; i++) {
1011 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1012 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1013 		CU_ASSERT(rc == 0);
1014 	}
1015 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1016 
1017 	/* Issue one more I/O to fill ENOMEM list. */
1018 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1019 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1020 	CU_ASSERT(rc == 0);
1021 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1022 
1023 	/*
1024 	 * Now submit I/O through the second bdev. This should go through and complete
1025 	 * successfully because we're using a different io_device underneath.
1026 	 */
1027 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1028 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1029 	CU_ASSERT(rc == 0);
1030 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1031 	stub_complete_io(second_bdev->io_target, 1);
1032 
1033 	/* Cleanup; Complete outstanding I/O. */
1034 	stub_complete_io(g_bdev.io_target, AVAIL);
1035 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1036 	/* Complete the ENOMEM I/O */
1037 	stub_complete_io(g_bdev.io_target, 1);
1038 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1039 
1040 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1041 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1042 	spdk_put_io_channel(io_ch);
1043 	spdk_put_io_channel(second_ch);
1044 	spdk_bdev_close(second_desc);
1045 	unregister_bdev(second_bdev);
1046 	spdk_io_device_unregister(&new_io_device, NULL);
1047 	poll_threads();
1048 	free(second_bdev);
1049 	teardown_test();
1050 }
1051 
1052 static void
1053 qos_dynamic_enable_done(void *cb_arg, int status)
1054 {
1055 	int *rc = cb_arg;
1056 	*rc = status;
1057 }
1058 
1059 static void
1060 qos_dynamic_enable(void)
1061 {
1062 	struct spdk_io_channel *io_ch[2];
1063 	struct spdk_bdev_channel *bdev_ch[2];
1064 	struct spdk_bdev *bdev;
1065 	enum spdk_bdev_io_status bdev_io_status[2];
1066 	int status, second_status, rc, i;
1067 
1068 	setup_test();
1069 	reset_time();
1070 
1071 	bdev = &g_bdev.bdev;
1072 
1073 	g_get_io_channel = true;
1074 
1075 	/* Create channels */
1076 	set_thread(0);
1077 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1078 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1079 	CU_ASSERT(bdev_ch[0]->flags == 0);
1080 
1081 	set_thread(1);
1082 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1083 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1084 	CU_ASSERT(bdev_ch[1]->flags == 0);
1085 
1086 	set_thread(0);
1087 
1088 	/* Enable QoS */
1089 	status = -1;
1090 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1091 	poll_threads();
1092 	CU_ASSERT(status == 0);
1093 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1094 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1095 
1096 	/*
1097 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1098 	 * Additional I/O will then be queued.
1099 	 */
1100 	set_thread(0);
1101 	for (i = 0; i < 10; i++) {
1102 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1103 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1104 		CU_ASSERT(rc == 0);
1105 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1106 		poll_thread(0);
1107 		stub_complete_io(g_bdev.io_target, 0);
1108 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1109 	}
1110 
1111 	/*
1112 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1113 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1114 	 *  1) are not aborted
1115 	 *  2) are sent back to their original thread for resubmission
1116 	 */
1117 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1118 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1119 	CU_ASSERT(rc == 0);
1120 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1121 	set_thread(1);
1122 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1123 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1124 	CU_ASSERT(rc == 0);
1125 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1126 	poll_threads();
1127 
1128 	/* Disable QoS */
1129 	status = -1;
1130 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1131 	poll_threads();
1132 	CU_ASSERT(status == 0);
1133 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1134 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1135 
1136 	/*
1137 	 * All I/O should have been resubmitted back on their original thread.  Complete
1138 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1139 	 */
1140 	set_thread(0);
1141 	stub_complete_io(g_bdev.io_target, 0);
1142 	poll_threads();
1143 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1144 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1145 
1146 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1147 	set_thread(1);
1148 	stub_complete_io(g_bdev.io_target, 0);
1149 	poll_threads();
1150 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1151 
1152 	/* Disable QoS again */
1153 	status = -1;
1154 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1155 	poll_threads();
1156 	CU_ASSERT(status == 0); /* This should succeed */
1157 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1158 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1159 
1160 	/* Enable QoS on thread 0 */
1161 	status = -1;
1162 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1163 	poll_threads();
1164 	CU_ASSERT(status == 0);
1165 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1166 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1167 
1168 	/* Disable QoS on thread 1 */
1169 	set_thread(1);
1170 	status = -1;
1171 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1172 	/* Don't poll yet. This should leave the channels with QoS enabled */
1173 	CU_ASSERT(status == -1);
1174 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1175 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1176 
1177 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1178 	second_status = 0;
1179 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1180 	poll_threads();
1181 	CU_ASSERT(status == 0); /* The disable should succeed */
1182 	CU_ASSERT(second_status < 0); /* The enable should fail */
1183 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1184 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1185 
1186 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1187 	status = -1;
1188 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1189 	poll_threads();
1190 	CU_ASSERT(status == 0);
1191 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1192 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1193 
1194 	/* Tear down the channels */
1195 	set_thread(0);
1196 	spdk_put_io_channel(io_ch[0]);
1197 	set_thread(1);
1198 	spdk_put_io_channel(io_ch[1]);
1199 	poll_threads();
1200 
1201 	set_thread(0);
1202 	teardown_test();
1203 }
1204 
1205 int
1206 main(int argc, char **argv)
1207 {
1208 	CU_pSuite	suite = NULL;
1209 	unsigned int	num_failures;
1210 
1211 	if (CU_initialize_registry() != CUE_SUCCESS) {
1212 		return CU_get_error();
1213 	}
1214 
1215 	suite = CU_add_suite("bdev", NULL, NULL);
1216 	if (suite == NULL) {
1217 		CU_cleanup_registry();
1218 		return CU_get_error();
1219 	}
1220 
1221 	if (
1222 		CU_add_test(suite, "basic", basic) == NULL ||
1223 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1224 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1225 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1226 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1227 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1228 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1229 		CU_add_test(suite, "enomem", enomem) == NULL ||
1230 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1231 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1232 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1233 	) {
1234 		CU_cleanup_registry();
1235 		return CU_get_error();
1236 	}
1237 
1238 	CU_basic_set_mode(CU_BRM_VERBOSE);
1239 	CU_basic_run_tests();
1240 	num_failures = CU_get_number_of_failures();
1241 	CU_cleanup_registry();
1242 	return num_failures;
1243 }
1244