xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision eac02a4ace1a51fa675ea044cd054dc1d372e767)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 /* Return NULL to test hardcoded defaults. */
51 struct spdk_conf_section *
52 spdk_conf_find_section(struct spdk_conf *cp, const char *name)
53 {
54 	return NULL;
55 }
56 
57 /* Return NULL to test hardcoded defaults. */
58 char *
59 spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
60 {
61 	return NULL;
62 }
63 
64 struct ut_bdev {
65 	struct spdk_bdev	bdev;
66 	void			*io_target;
67 };
68 
69 struct ut_bdev_channel {
70 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
71 	uint32_t			outstanding_cnt;
72 	uint32_t			avail_cnt;
73 };
74 
75 int g_io_device;
76 struct ut_bdev g_bdev;
77 struct spdk_bdev_desc *g_desc;
78 bool g_teardown_done = false;
79 bool g_get_io_channel = true;
80 bool g_create_ch = true;
81 bool g_init_complete_called = false;
82 
83 static int
84 stub_create_ch(void *io_device, void *ctx_buf)
85 {
86 	struct ut_bdev_channel *ch = ctx_buf;
87 
88 	if (g_create_ch == false) {
89 		return -1;
90 	}
91 
92 	TAILQ_INIT(&ch->outstanding_io);
93 	ch->outstanding_cnt = 0;
94 	/*
95 	 * When avail gets to 0, the submit_request function will return ENOMEM.
96 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
97 	 *  big value that won't get hit.  The ENOMEM tests can then override this
98 	 *  value to something much smaller to induce ENOMEM conditions.
99 	 */
100 	ch->avail_cnt = 2048;
101 	return 0;
102 }
103 
104 static void
105 stub_destroy_ch(void *io_device, void *ctx_buf)
106 {
107 }
108 
109 static struct spdk_io_channel *
110 stub_get_io_channel(void *ctx)
111 {
112 	struct ut_bdev *ut_bdev = ctx;
113 
114 	if (g_get_io_channel == true) {
115 		return spdk_get_io_channel(ut_bdev->io_target);
116 	} else {
117 		return NULL;
118 	}
119 }
120 
121 static int
122 stub_destruct(void *ctx)
123 {
124 	return 0;
125 }
126 
127 static void
128 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
129 {
130 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
131 
132 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
133 		struct spdk_bdev_io *io;
134 
135 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
136 			io = TAILQ_FIRST(&ch->outstanding_io);
137 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
138 			ch->outstanding_cnt--;
139 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
140 			ch->avail_cnt++;
141 		}
142 	}
143 
144 	if (ch->avail_cnt > 0) {
145 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
146 		ch->outstanding_cnt++;
147 		ch->avail_cnt--;
148 	} else {
149 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
150 	}
151 }
152 
153 static uint32_t
154 stub_complete_io(void *io_target, uint32_t num_to_complete)
155 {
156 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
157 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
158 	struct spdk_bdev_io *io;
159 	bool complete_all = (num_to_complete == 0);
160 	uint32_t num_completed = 0;
161 
162 	while (complete_all || num_completed < num_to_complete) {
163 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
164 			break;
165 		}
166 		io = TAILQ_FIRST(&ch->outstanding_io);
167 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
168 		ch->outstanding_cnt--;
169 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
170 		ch->avail_cnt++;
171 		num_completed++;
172 	}
173 
174 	spdk_put_io_channel(_ch);
175 	return num_completed;
176 }
177 
178 static struct spdk_bdev_fn_table fn_table = {
179 	.get_io_channel =	stub_get_io_channel,
180 	.destruct =		stub_destruct,
181 	.submit_request =	stub_submit_request,
182 };
183 
184 static int
185 module_init(void)
186 {
187 	return 0;
188 }
189 
190 static void
191 module_fini(void)
192 {
193 }
194 
195 static void
196 init_complete(void)
197 {
198 	g_init_complete_called = true;
199 }
200 
201 struct spdk_bdev_module bdev_ut_if = {
202 	.name = "bdev_ut",
203 	.module_init = module_init,
204 	.module_fini = module_fini,
205 	.init_complete = init_complete,
206 };
207 
208 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
209 
210 static void
211 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
212 {
213 	memset(ut_bdev, 0, sizeof(*ut_bdev));
214 
215 	ut_bdev->io_target = io_target;
216 	ut_bdev->bdev.ctxt = ut_bdev;
217 	ut_bdev->bdev.name = name;
218 	ut_bdev->bdev.fn_table = &fn_table;
219 	ut_bdev->bdev.module = &bdev_ut_if;
220 	ut_bdev->bdev.blocklen = 4096;
221 	ut_bdev->bdev.blockcnt = 1024;
222 
223 	spdk_bdev_register(&ut_bdev->bdev);
224 }
225 
226 static void
227 unregister_bdev(struct ut_bdev *ut_bdev)
228 {
229 	/* Handle any deferred messages. */
230 	poll_threads();
231 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
232 }
233 
234 static void
235 bdev_init_cb(void *done, int rc)
236 {
237 	CU_ASSERT(rc == 0);
238 	*(bool *)done = true;
239 }
240 
241 static void
242 setup_test(void)
243 {
244 	bool done = false;
245 
246 	allocate_threads(BDEV_UT_NUM_THREADS);
247 	spdk_bdev_initialize(bdev_init_cb, &done);
248 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
249 				sizeof(struct ut_bdev_channel));
250 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
251 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
252 }
253 
254 static void
255 finish_cb(void *cb_arg)
256 {
257 	g_teardown_done = true;
258 }
259 
260 static void
261 teardown_test(void)
262 {
263 	g_teardown_done = false;
264 	spdk_bdev_close(g_desc);
265 	g_desc = NULL;
266 	unregister_bdev(&g_bdev);
267 	spdk_io_device_unregister(&g_io_device, NULL);
268 	spdk_bdev_finish(finish_cb, NULL);
269 	poll_threads();
270 	memset(&g_bdev, 0, sizeof(g_bdev));
271 	CU_ASSERT(g_teardown_done == true);
272 	g_teardown_done = false;
273 	free_threads();
274 }
275 
276 static uint32_t
277 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
278 {
279 	struct spdk_bdev_io *io;
280 	uint32_t cnt = 0;
281 
282 	TAILQ_FOREACH(io, tailq, link) {
283 		cnt++;
284 	}
285 
286 	return cnt;
287 }
288 
289 static void
290 basic(void)
291 {
292 	g_init_complete_called = false;
293 	setup_test();
294 	CU_ASSERT(g_init_complete_called == true);
295 
296 	set_thread(0);
297 
298 	g_get_io_channel = false;
299 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
300 	CU_ASSERT(g_ut_threads[0].ch == NULL);
301 
302 	g_get_io_channel = true;
303 	g_create_ch = false;
304 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
305 	CU_ASSERT(g_ut_threads[0].ch == NULL);
306 
307 	g_get_io_channel = true;
308 	g_create_ch = true;
309 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
310 	CU_ASSERT(g_ut_threads[0].ch != NULL);
311 	spdk_put_io_channel(g_ut_threads[0].ch);
312 
313 	teardown_test();
314 }
315 
316 static int
317 poller_run_done(void *ctx)
318 {
319 	bool	*poller_run = ctx;
320 
321 	*poller_run = true;
322 
323 	return -1;
324 }
325 
326 static int
327 poller_run_times_done(void *ctx)
328 {
329 	int	*poller_run_times = ctx;
330 
331 	(*poller_run_times)++;
332 
333 	return -1;
334 }
335 
336 static void
337 basic_poller(void)
338 {
339 	struct spdk_poller	*poller = NULL;
340 	bool			poller_run = false;
341 	int			poller_run_times = 0;
342 
343 	setup_test();
344 
345 	set_thread(0);
346 	reset_time();
347 	/* Register a poller with no-wait time and test execution */
348 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
349 	CU_ASSERT(poller != NULL);
350 
351 	poll_threads();
352 	CU_ASSERT(poller_run == true);
353 
354 	spdk_poller_unregister(&poller);
355 	CU_ASSERT(poller == NULL);
356 
357 	/* Register a poller with 1000us wait time and test single execution */
358 	poller_run = false;
359 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
360 	CU_ASSERT(poller != NULL);
361 
362 	poll_threads();
363 	CU_ASSERT(poller_run == false);
364 
365 	increment_time(1000);
366 	poll_threads();
367 	CU_ASSERT(poller_run == true);
368 
369 	reset_time();
370 	poller_run = false;
371 	poll_threads();
372 	CU_ASSERT(poller_run == false);
373 
374 	increment_time(1000);
375 	poll_threads();
376 	CU_ASSERT(poller_run == true);
377 
378 	spdk_poller_unregister(&poller);
379 	CU_ASSERT(poller == NULL);
380 
381 	reset_time();
382 	/* Register a poller with 1000us wait time and test multiple execution */
383 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
384 	CU_ASSERT(poller != NULL);
385 
386 	poll_threads();
387 	CU_ASSERT(poller_run_times == 0);
388 
389 	increment_time(1000);
390 	poll_threads();
391 	CU_ASSERT(poller_run_times == 1);
392 
393 	poller_run_times = 0;
394 	increment_time(2000);
395 	poll_threads();
396 	CU_ASSERT(poller_run_times == 2);
397 
398 	spdk_poller_unregister(&poller);
399 	CU_ASSERT(poller == NULL);
400 
401 	teardown_test();
402 }
403 
404 static void
405 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
406 {
407 	bool *done = cb_arg;
408 
409 	CU_ASSERT(success == true);
410 	*done = true;
411 	spdk_bdev_free_io(bdev_io);
412 }
413 
414 static void
415 put_channel_during_reset(void)
416 {
417 	struct spdk_io_channel *io_ch;
418 	bool done = false;
419 
420 	setup_test();
421 
422 	set_thread(0);
423 	io_ch = spdk_bdev_get_io_channel(g_desc);
424 	CU_ASSERT(io_ch != NULL);
425 
426 	/*
427 	 * Start a reset, but then put the I/O channel before
428 	 *  the deferred messages for the reset get a chance to
429 	 *  execute.
430 	 */
431 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
432 	spdk_put_io_channel(io_ch);
433 	poll_threads();
434 	stub_complete_io(g_bdev.io_target, 0);
435 
436 	teardown_test();
437 }
438 
439 static void
440 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
441 {
442 	enum spdk_bdev_io_status *status = cb_arg;
443 
444 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
445 	spdk_bdev_free_io(bdev_io);
446 }
447 
448 static void
449 aborted_reset(void)
450 {
451 	struct spdk_io_channel *io_ch[2];
452 	enum spdk_bdev_io_status status1, status2;
453 
454 	setup_test();
455 
456 	set_thread(0);
457 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
458 	CU_ASSERT(io_ch[0] != NULL);
459 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
460 	poll_threads();
461 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
462 
463 	/*
464 	 * First reset has been submitted on ch0.  Now submit a second
465 	 *  reset on ch1 which will get queued since there is already a
466 	 *  reset in progress.
467 	 */
468 	set_thread(1);
469 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
470 	CU_ASSERT(io_ch[1] != NULL);
471 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
472 	poll_threads();
473 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
474 
475 	/*
476 	 * Now destroy ch1.  This will abort the queued reset.  Check that
477 	 *  the second reset was completed with failed status.  Also check
478 	 *  that bdev->reset_in_progress != NULL, since the original reset
479 	 *  has not been completed yet.  This ensures that the bdev code is
480 	 *  correctly noticing that the failed reset is *not* the one that
481 	 *  had been submitted to the bdev module.
482 	 */
483 	set_thread(1);
484 	spdk_put_io_channel(io_ch[1]);
485 	poll_threads();
486 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
487 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
488 
489 	/*
490 	 * Now complete the first reset, verify that it completed with SUCCESS
491 	 *  status and that bdev->reset_in_progress is also set back to NULL.
492 	 */
493 	set_thread(0);
494 	spdk_put_io_channel(io_ch[0]);
495 	stub_complete_io(g_bdev.io_target, 0);
496 	poll_threads();
497 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
498 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
499 
500 	teardown_test();
501 }
502 
503 static void
504 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
505 {
506 	enum spdk_bdev_io_status *status = cb_arg;
507 
508 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
509 	spdk_bdev_free_io(bdev_io);
510 }
511 
512 static void
513 io_during_reset(void)
514 {
515 	struct spdk_io_channel *io_ch[2];
516 	struct spdk_bdev_channel *bdev_ch[2];
517 	enum spdk_bdev_io_status status0, status1, status_reset;
518 	int rc;
519 
520 	setup_test();
521 
522 	/*
523 	 * First test normal case - submit an I/O on each of two channels (with no resets)
524 	 *  and verify they complete successfully.
525 	 */
526 	set_thread(0);
527 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
528 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
529 	CU_ASSERT(bdev_ch[0]->flags == 0);
530 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
531 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
532 	CU_ASSERT(rc == 0);
533 
534 	set_thread(1);
535 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
536 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
537 	CU_ASSERT(bdev_ch[1]->flags == 0);
538 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
539 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
540 	CU_ASSERT(rc == 0);
541 
542 	poll_threads();
543 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
544 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
545 
546 	set_thread(0);
547 	stub_complete_io(g_bdev.io_target, 0);
548 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
549 
550 	set_thread(1);
551 	stub_complete_io(g_bdev.io_target, 0);
552 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
553 
554 	/*
555 	 * Now submit a reset, and leave it pending while we submit I/O on two different
556 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
557 	 *  progress.
558 	 */
559 	set_thread(0);
560 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
561 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
562 	CU_ASSERT(rc == 0);
563 
564 	CU_ASSERT(bdev_ch[0]->flags == 0);
565 	CU_ASSERT(bdev_ch[1]->flags == 0);
566 	poll_threads();
567 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
568 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
569 
570 	set_thread(0);
571 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
572 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
573 	CU_ASSERT(rc == 0);
574 
575 	set_thread(1);
576 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
577 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
578 	CU_ASSERT(rc == 0);
579 
580 	/*
581 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
582 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
583 	 */
584 	poll_threads();
585 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
586 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
587 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
588 
589 	/*
590 	 * Complete the reset
591 	 */
592 	set_thread(0);
593 	stub_complete_io(g_bdev.io_target, 0);
594 
595 	/*
596 	 * Only poll thread 0. We should not get a completion.
597 	 */
598 	poll_thread(0);
599 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
600 
601 	/*
602 	 * Poll both thread 0 and 1 so the messages can propagate and we
603 	 * get a completion.
604 	 */
605 	poll_threads();
606 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
607 
608 	spdk_put_io_channel(io_ch[0]);
609 	set_thread(1);
610 	spdk_put_io_channel(io_ch[1]);
611 	poll_threads();
612 
613 	teardown_test();
614 }
615 
616 static void
617 basic_qos(void)
618 {
619 	struct spdk_io_channel *io_ch[2];
620 	struct spdk_bdev_channel *bdev_ch[2];
621 	struct spdk_bdev *bdev;
622 	enum spdk_bdev_io_status status;
623 	int rc;
624 
625 	setup_test();
626 
627 	/* Enable QoS */
628 	bdev = &g_bdev.bdev;
629 	bdev->qos = calloc(1, sizeof(*bdev->qos));
630 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
631 	TAILQ_INIT(&bdev->qos->queued);
632 	bdev->qos->rate_limit = 2000; /* 2 I/O per millisecond */
633 
634 	g_get_io_channel = true;
635 
636 	set_thread(0);
637 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
638 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
639 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
640 
641 	set_thread(1);
642 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
643 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
644 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
645 
646 	/*
647 	 * Send an I/O on thread 0, which is where the QoS thread is running.
648 	 */
649 	set_thread(0);
650 	status = SPDK_BDEV_IO_STATUS_PENDING;
651 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
652 	CU_ASSERT(rc == 0);
653 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
654 	poll_threads();
655 	stub_complete_io(g_bdev.io_target, 0);
656 	poll_threads();
657 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
658 
659 	/* Send an I/O on thread 1. The QoS thread is not running here. */
660 	status = SPDK_BDEV_IO_STATUS_PENDING;
661 	set_thread(1);
662 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
663 	CU_ASSERT(rc == 0);
664 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
665 	poll_threads();
666 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
667 	stub_complete_io(g_bdev.io_target, 0);
668 	poll_threads();
669 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
670 	/* Now complete I/O on thread 0 */
671 	set_thread(0);
672 	poll_threads();
673 	stub_complete_io(g_bdev.io_target, 0);
674 	poll_threads();
675 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
676 
677 	/* Tear down the channels */
678 	set_thread(0);
679 	spdk_put_io_channel(io_ch[0]);
680 	set_thread(1);
681 	spdk_put_io_channel(io_ch[1]);
682 	poll_threads();
683 	set_thread(0);
684 
685 	/* Close the descriptor, which should stop the qos channel */
686 	spdk_bdev_close(g_desc);
687 	CU_ASSERT(bdev->qos->ch == NULL);
688 
689 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
690 
691 	/* Create the channels in reverse order. */
692 	set_thread(1);
693 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
694 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
695 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
696 
697 	set_thread(0);
698 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
699 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
700 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
701 
702 	/* Confirm that the qos tracking was re-enabled */
703 	CU_ASSERT(bdev->qos->ch != NULL);
704 
705 	/* Tear down the channels */
706 	set_thread(0);
707 	spdk_put_io_channel(io_ch[0]);
708 	set_thread(1);
709 	spdk_put_io_channel(io_ch[1]);
710 	poll_threads();
711 
712 	set_thread(0);
713 
714 	teardown_test();
715 }
716 
717 static void
718 io_during_qos_queue(void)
719 {
720 	struct spdk_io_channel *io_ch[2];
721 	struct spdk_bdev_channel *bdev_ch[2];
722 	struct spdk_bdev *bdev;
723 	enum spdk_bdev_io_status status0, status1;
724 	int rc;
725 
726 	setup_test();
727 	reset_time();
728 
729 	/* Enable QoS */
730 	bdev = &g_bdev.bdev;
731 	bdev->qos = calloc(1, sizeof(*bdev->qos));
732 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
733 	TAILQ_INIT(&bdev->qos->queued);
734 	bdev->qos->rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
735 
736 	g_get_io_channel = true;
737 
738 	/* Create channels */
739 	set_thread(0);
740 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
741 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
742 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
743 
744 	set_thread(1);
745 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
746 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
747 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
748 
749 	/* Send two I/O */
750 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
751 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
752 	CU_ASSERT(rc == 0);
753 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
754 	set_thread(0);
755 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
756 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
757 	CU_ASSERT(rc == 0);
758 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
759 
760 	/* Complete any I/O that arrived at the disk */
761 	poll_threads();
762 	set_thread(1);
763 	stub_complete_io(g_bdev.io_target, 0);
764 	set_thread(0);
765 	stub_complete_io(g_bdev.io_target, 0);
766 	poll_threads();
767 
768 	/* Only one of the I/O should complete. (logical XOR) */
769 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
770 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
771 	} else {
772 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
773 	}
774 
775 	/* Advance in time by a millisecond */
776 	increment_time(1000);
777 
778 	/* Complete more I/O */
779 	poll_threads();
780 	set_thread(1);
781 	stub_complete_io(g_bdev.io_target, 0);
782 	set_thread(0);
783 	stub_complete_io(g_bdev.io_target, 0);
784 	poll_threads();
785 
786 	/* Now the second I/O should be done */
787 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
788 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
789 
790 	/* Tear down the channels */
791 	set_thread(1);
792 	spdk_put_io_channel(io_ch[1]);
793 	set_thread(0);
794 	spdk_put_io_channel(io_ch[0]);
795 	poll_threads();
796 
797 	teardown_test();
798 }
799 
800 static void
801 io_during_qos_reset(void)
802 {
803 	struct spdk_io_channel *io_ch[2];
804 	struct spdk_bdev_channel *bdev_ch[2];
805 	struct spdk_bdev *bdev;
806 	enum spdk_bdev_io_status status0, status1, reset_status;
807 	int rc;
808 
809 	setup_test();
810 	reset_time();
811 
812 	/* Enable QoS */
813 	bdev = &g_bdev.bdev;
814 	bdev->qos = calloc(1, sizeof(*bdev->qos));
815 	SPDK_CU_ASSERT_FATAL(bdev->qos != NULL);
816 	TAILQ_INIT(&bdev->qos->queued);
817 	bdev->qos->rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
818 
819 	g_get_io_channel = true;
820 
821 	/* Create channels */
822 	set_thread(0);
823 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
824 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
825 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
826 
827 	set_thread(1);
828 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
829 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
830 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
831 
832 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
833 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
834 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
835 	CU_ASSERT(rc == 0);
836 	set_thread(0);
837 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
838 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
839 	CU_ASSERT(rc == 0);
840 
841 	poll_threads();
842 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
843 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
844 
845 	/* Reset the bdev. */
846 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
847 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
848 	CU_ASSERT(rc == 0);
849 
850 	/* Complete any I/O that arrived at the disk */
851 	poll_threads();
852 	set_thread(1);
853 	stub_complete_io(g_bdev.io_target, 0);
854 	set_thread(0);
855 	stub_complete_io(g_bdev.io_target, 0);
856 	poll_threads();
857 
858 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
859 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
860 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
861 
862 	/* Tear down the channels */
863 	set_thread(1);
864 	spdk_put_io_channel(io_ch[1]);
865 	set_thread(0);
866 	spdk_put_io_channel(io_ch[0]);
867 	poll_threads();
868 
869 	teardown_test();
870 }
871 
872 static void
873 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
874 {
875 	enum spdk_bdev_io_status *status = cb_arg;
876 
877 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
878 	spdk_bdev_free_io(bdev_io);
879 }
880 
881 static void
882 enomem(void)
883 {
884 	struct spdk_io_channel *io_ch;
885 	struct spdk_bdev_channel *bdev_ch;
886 	struct spdk_bdev_shared_resource *shared_resource;
887 	struct ut_bdev_channel *ut_ch;
888 	const uint32_t IO_ARRAY_SIZE = 64;
889 	const uint32_t AVAIL = 20;
890 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
891 	uint32_t nomem_cnt, i;
892 	struct spdk_bdev_io *first_io;
893 	int rc;
894 
895 	setup_test();
896 
897 	set_thread(0);
898 	io_ch = spdk_bdev_get_io_channel(g_desc);
899 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
900 	shared_resource = bdev_ch->shared_resource;
901 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
902 	ut_ch->avail_cnt = AVAIL;
903 
904 	/* First submit a number of IOs equal to what the channel can support. */
905 	for (i = 0; i < AVAIL; i++) {
906 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
907 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
908 		CU_ASSERT(rc == 0);
909 	}
910 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
911 
912 	/*
913 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
914 	 *  the enomem_io list.
915 	 */
916 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
917 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
918 	CU_ASSERT(rc == 0);
919 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
920 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
921 
922 	/*
923 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
924 	 *  the first_io above.
925 	 */
926 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
927 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
928 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
929 		CU_ASSERT(rc == 0);
930 	}
931 
932 	/* Assert that first_io is still at the head of the list. */
933 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
934 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
935 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
936 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
937 
938 	/*
939 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
940 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
941 	 *  list.
942 	 */
943 	stub_complete_io(g_bdev.io_target, 1);
944 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
945 
946 	/*
947 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
948 	 *  and we should see I/O get resubmitted to the test bdev module.
949 	 */
950 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
951 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
952 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
953 
954 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
955 	stub_complete_io(g_bdev.io_target, 1);
956 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
957 
958 	/*
959 	 * Send a reset and confirm that all I/O are completed, including the ones that
960 	 *  were queued on the nomem_io list.
961 	 */
962 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
963 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
964 	poll_threads();
965 	CU_ASSERT(rc == 0);
966 	/* This will complete the reset. */
967 	stub_complete_io(g_bdev.io_target, 0);
968 
969 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
970 	CU_ASSERT(shared_resource->io_outstanding == 0);
971 
972 	spdk_put_io_channel(io_ch);
973 	poll_threads();
974 	teardown_test();
975 }
976 
977 static void
978 enomem_multi_bdev(void)
979 {
980 	struct spdk_io_channel *io_ch;
981 	struct spdk_bdev_channel *bdev_ch;
982 	struct spdk_bdev_shared_resource *shared_resource;
983 	struct ut_bdev_channel *ut_ch;
984 	const uint32_t IO_ARRAY_SIZE = 64;
985 	const uint32_t AVAIL = 20;
986 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
987 	uint32_t i;
988 	struct ut_bdev *second_bdev;
989 	struct spdk_bdev_desc *second_desc;
990 	struct spdk_bdev_channel *second_bdev_ch;
991 	struct spdk_io_channel *second_ch;
992 	int rc;
993 
994 	setup_test();
995 
996 	/* Register second bdev with the same io_target  */
997 	second_bdev = calloc(1, sizeof(*second_bdev));
998 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
999 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1000 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1001 
1002 	set_thread(0);
1003 	io_ch = spdk_bdev_get_io_channel(g_desc);
1004 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1005 	shared_resource = bdev_ch->shared_resource;
1006 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1007 	ut_ch->avail_cnt = AVAIL;
1008 
1009 	second_ch = spdk_bdev_get_io_channel(second_desc);
1010 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1011 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1012 
1013 	/* Saturate io_target through bdev A. */
1014 	for (i = 0; i < AVAIL; i++) {
1015 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1016 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1017 		CU_ASSERT(rc == 0);
1018 	}
1019 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1020 
1021 	/*
1022 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1023 	 * and then go onto the nomem_io list.
1024 	 */
1025 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1026 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1027 	CU_ASSERT(rc == 0);
1028 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1029 
1030 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1031 	stub_complete_io(g_bdev.io_target, AVAIL);
1032 
1033 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1034 	CU_ASSERT(shared_resource->io_outstanding == 1);
1035 
1036 	/* Now complete our retried I/O  */
1037 	stub_complete_io(g_bdev.io_target, 1);
1038 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1039 
1040 	spdk_put_io_channel(io_ch);
1041 	spdk_put_io_channel(second_ch);
1042 	spdk_bdev_close(second_desc);
1043 	unregister_bdev(second_bdev);
1044 	poll_threads();
1045 	free(second_bdev);
1046 	teardown_test();
1047 }
1048 
1049 
1050 static void
1051 enomem_multi_io_target(void)
1052 {
1053 	struct spdk_io_channel *io_ch;
1054 	struct spdk_bdev_channel *bdev_ch;
1055 	struct ut_bdev_channel *ut_ch;
1056 	const uint32_t IO_ARRAY_SIZE = 64;
1057 	const uint32_t AVAIL = 20;
1058 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1059 	uint32_t i;
1060 	int new_io_device;
1061 	struct ut_bdev *second_bdev;
1062 	struct spdk_bdev_desc *second_desc;
1063 	struct spdk_bdev_channel *second_bdev_ch;
1064 	struct spdk_io_channel *second_ch;
1065 	int rc;
1066 
1067 	setup_test();
1068 
1069 	/* Create new io_target and a second bdev using it */
1070 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1071 				sizeof(struct ut_bdev_channel));
1072 	second_bdev = calloc(1, sizeof(*second_bdev));
1073 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1074 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1075 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1076 
1077 	set_thread(0);
1078 	io_ch = spdk_bdev_get_io_channel(g_desc);
1079 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1080 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1081 	ut_ch->avail_cnt = AVAIL;
1082 
1083 	/* Different io_target should imply a different shared_resource */
1084 	second_ch = spdk_bdev_get_io_channel(second_desc);
1085 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1086 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1087 
1088 	/* Saturate io_target through bdev A. */
1089 	for (i = 0; i < AVAIL; i++) {
1090 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1091 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1092 		CU_ASSERT(rc == 0);
1093 	}
1094 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1095 
1096 	/* Issue one more I/O to fill ENOMEM list. */
1097 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1098 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1099 	CU_ASSERT(rc == 0);
1100 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1101 
1102 	/*
1103 	 * Now submit I/O through the second bdev. This should go through and complete
1104 	 * successfully because we're using a different io_device underneath.
1105 	 */
1106 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1107 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1108 	CU_ASSERT(rc == 0);
1109 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1110 	stub_complete_io(second_bdev->io_target, 1);
1111 
1112 	/* Cleanup; Complete outstanding I/O. */
1113 	stub_complete_io(g_bdev.io_target, AVAIL);
1114 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1115 	/* Complete the ENOMEM I/O */
1116 	stub_complete_io(g_bdev.io_target, 1);
1117 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1118 
1119 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1120 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1121 	spdk_put_io_channel(io_ch);
1122 	spdk_put_io_channel(second_ch);
1123 	spdk_bdev_close(second_desc);
1124 	unregister_bdev(second_bdev);
1125 	spdk_io_device_unregister(&new_io_device, NULL);
1126 	poll_threads();
1127 	free(second_bdev);
1128 	teardown_test();
1129 }
1130 
1131 static void
1132 qos_dynamic_enable_done(void *cb_arg, int status)
1133 {
1134 	int *rc = cb_arg;
1135 	*rc = status;
1136 }
1137 
1138 static void
1139 qos_dynamic_enable(void)
1140 {
1141 	struct spdk_io_channel *io_ch[2];
1142 	struct spdk_bdev_channel *bdev_ch[2];
1143 	struct spdk_bdev *bdev;
1144 	int status, second_status;
1145 
1146 	setup_test();
1147 	reset_time();
1148 
1149 	bdev = &g_bdev.bdev;
1150 
1151 	g_get_io_channel = true;
1152 
1153 	/* Create channels */
1154 	set_thread(0);
1155 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1156 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1157 	CU_ASSERT(bdev_ch[0]->flags == 0);
1158 
1159 	set_thread(1);
1160 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1161 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1162 	CU_ASSERT(bdev_ch[1]->flags == 0);
1163 
1164 	set_thread(0);
1165 
1166 	/* Enable QoS */
1167 	status = -1;
1168 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1169 	poll_threads();
1170 	CU_ASSERT(status == 0);
1171 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1172 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1173 
1174 	/* Disable QoS */
1175 	status = -1;
1176 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1177 	poll_threads();
1178 	CU_ASSERT(status == 0);
1179 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1180 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1181 
1182 	/* Disable QoS again */
1183 	status = -1;
1184 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1185 	poll_threads();
1186 	CU_ASSERT(status == 0); /* This should succeed */
1187 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1188 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1189 
1190 	/* Enable QoS on thread 0 */
1191 	status = -1;
1192 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1193 	poll_threads();
1194 	CU_ASSERT(status == 0);
1195 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1196 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1197 
1198 	/* Disable QoS on thread 1 */
1199 	set_thread(1);
1200 	status = -1;
1201 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1202 	/* Don't poll yet. This should leave the channels with QoS enabled */
1203 	CU_ASSERT(status == -1);
1204 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1205 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1206 
1207 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1208 	second_status = 0;
1209 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1210 	poll_threads();
1211 	CU_ASSERT(status == 0); /* The disable should succeed */
1212 	CU_ASSERT(second_status < 0); /* The enable should fail */
1213 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1214 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1215 
1216 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1217 	status = -1;
1218 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1219 	poll_threads();
1220 	CU_ASSERT(status == 0);
1221 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1222 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1223 
1224 	/* Tear down the channels */
1225 	set_thread(0);
1226 	spdk_put_io_channel(io_ch[0]);
1227 	set_thread(1);
1228 	spdk_put_io_channel(io_ch[1]);
1229 	poll_threads();
1230 
1231 	set_thread(0);
1232 	teardown_test();
1233 }
1234 
1235 int
1236 main(int argc, char **argv)
1237 {
1238 	CU_pSuite	suite = NULL;
1239 	unsigned int	num_failures;
1240 
1241 	if (CU_initialize_registry() != CUE_SUCCESS) {
1242 		return CU_get_error();
1243 	}
1244 
1245 	suite = CU_add_suite("bdev", NULL, NULL);
1246 	if (suite == NULL) {
1247 		CU_cleanup_registry();
1248 		return CU_get_error();
1249 	}
1250 
1251 	if (
1252 		CU_add_test(suite, "basic", basic) == NULL ||
1253 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1254 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1255 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1256 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1257 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1258 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1259 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1260 		CU_add_test(suite, "enomem", enomem) == NULL ||
1261 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1262 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1263 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1264 	) {
1265 		CU_cleanup_registry();
1266 		return CU_get_error();
1267 	}
1268 
1269 	CU_basic_set_mode(CU_BRM_VERBOSE);
1270 	CU_basic_run_tests();
1271 	num_failures = CU_get_number_of_failures();
1272 	CU_cleanup_registry();
1273 	return num_failures;
1274 }
1275