xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 2fac05e919e1940137e4502f01beabb81ebbef9c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct ut_bdev {
57 	struct spdk_bdev	bdev;
58 	void			*io_target;
59 };
60 
61 struct ut_bdev_channel {
62 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
63 	uint32_t			outstanding_cnt;
64 	uint32_t			avail_cnt;
65 };
66 
67 int g_io_device;
68 struct ut_bdev g_bdev;
69 struct spdk_bdev_desc *g_desc;
70 bool g_teardown_done = false;
71 bool g_get_io_channel = true;
72 bool g_create_ch = true;
73 bool g_init_complete_called = false;
74 
75 static int
76 stub_create_ch(void *io_device, void *ctx_buf)
77 {
78 	struct ut_bdev_channel *ch = ctx_buf;
79 
80 	if (g_create_ch == false) {
81 		return -1;
82 	}
83 
84 	TAILQ_INIT(&ch->outstanding_io);
85 	ch->outstanding_cnt = 0;
86 	/*
87 	 * When avail gets to 0, the submit_request function will return ENOMEM.
88 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
89 	 *  big value that won't get hit.  The ENOMEM tests can then override this
90 	 *  value to something much smaller to induce ENOMEM conditions.
91 	 */
92 	ch->avail_cnt = 2048;
93 	return 0;
94 }
95 
96 static void
97 stub_destroy_ch(void *io_device, void *ctx_buf)
98 {
99 }
100 
101 static struct spdk_io_channel *
102 stub_get_io_channel(void *ctx)
103 {
104 	struct ut_bdev *ut_bdev = ctx;
105 
106 	if (g_get_io_channel == true) {
107 		return spdk_get_io_channel(ut_bdev->io_target);
108 	} else {
109 		return NULL;
110 	}
111 }
112 
113 static int
114 stub_destruct(void *ctx)
115 {
116 	return 0;
117 }
118 
119 static void
120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
121 {
122 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
123 
124 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
125 		struct spdk_bdev_io *io;
126 
127 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
128 			io = TAILQ_FIRST(&ch->outstanding_io);
129 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
130 			ch->outstanding_cnt--;
131 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
132 			ch->avail_cnt++;
133 		}
134 	}
135 
136 	if (ch->avail_cnt > 0) {
137 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
138 		ch->outstanding_cnt++;
139 		ch->avail_cnt--;
140 	} else {
141 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
142 	}
143 }
144 
145 static uint32_t
146 stub_complete_io(void *io_target, uint32_t num_to_complete)
147 {
148 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
149 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
150 	struct spdk_bdev_io *io;
151 	bool complete_all = (num_to_complete == 0);
152 	uint32_t num_completed = 0;
153 
154 	while (complete_all || num_completed < num_to_complete) {
155 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
156 			break;
157 		}
158 		io = TAILQ_FIRST(&ch->outstanding_io);
159 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
160 		ch->outstanding_cnt--;
161 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
162 		ch->avail_cnt++;
163 		num_completed++;
164 	}
165 
166 	spdk_put_io_channel(_ch);
167 	return num_completed;
168 }
169 
170 static struct spdk_bdev_fn_table fn_table = {
171 	.get_io_channel =	stub_get_io_channel,
172 	.destruct =		stub_destruct,
173 	.submit_request =	stub_submit_request,
174 };
175 
176 static int
177 module_init(void)
178 {
179 	return 0;
180 }
181 
182 static void
183 module_fini(void)
184 {
185 }
186 
187 static void
188 init_complete(void)
189 {
190 	g_init_complete_called = true;
191 }
192 
193 struct spdk_bdev_module bdev_ut_if = {
194 	.name = "bdev_ut",
195 	.module_init = module_init,
196 	.module_fini = module_fini,
197 	.init_complete = init_complete,
198 };
199 
200 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
201 
202 static void
203 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
204 {
205 	memset(ut_bdev, 0, sizeof(*ut_bdev));
206 
207 	ut_bdev->io_target = io_target;
208 	ut_bdev->bdev.ctxt = ut_bdev;
209 	ut_bdev->bdev.name = name;
210 	ut_bdev->bdev.fn_table = &fn_table;
211 	ut_bdev->bdev.module = &bdev_ut_if;
212 	ut_bdev->bdev.blocklen = 4096;
213 	ut_bdev->bdev.blockcnt = 1024;
214 
215 	spdk_bdev_register(&ut_bdev->bdev);
216 }
217 
218 static void
219 unregister_bdev(struct ut_bdev *ut_bdev)
220 {
221 	/* Handle any deferred messages. */
222 	poll_threads();
223 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
224 }
225 
226 static void
227 bdev_init_cb(void *done, int rc)
228 {
229 	CU_ASSERT(rc == 0);
230 	*(bool *)done = true;
231 }
232 
233 static void
234 setup_test(void)
235 {
236 	bool done = false;
237 
238 	allocate_threads(BDEV_UT_NUM_THREADS);
239 	spdk_bdev_initialize(bdev_init_cb, &done);
240 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
241 				sizeof(struct ut_bdev_channel));
242 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
243 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
244 }
245 
246 static void
247 finish_cb(void *cb_arg)
248 {
249 	g_teardown_done = true;
250 }
251 
252 static void
253 teardown_test(void)
254 {
255 	g_teardown_done = false;
256 	spdk_bdev_close(g_desc);
257 	g_desc = NULL;
258 	unregister_bdev(&g_bdev);
259 	spdk_io_device_unregister(&g_io_device, NULL);
260 	spdk_bdev_finish(finish_cb, NULL);
261 	poll_threads();
262 	memset(&g_bdev, 0, sizeof(g_bdev));
263 	CU_ASSERT(g_teardown_done == true);
264 	g_teardown_done = false;
265 	free_threads();
266 }
267 
268 static uint32_t
269 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
270 {
271 	struct spdk_bdev_io *io;
272 	uint32_t cnt = 0;
273 
274 	TAILQ_FOREACH(io, tailq, internal.link) {
275 		cnt++;
276 	}
277 
278 	return cnt;
279 }
280 
281 static void
282 basic(void)
283 {
284 	g_init_complete_called = false;
285 	setup_test();
286 	CU_ASSERT(g_init_complete_called == true);
287 
288 	set_thread(0);
289 
290 	g_get_io_channel = false;
291 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
292 	CU_ASSERT(g_ut_threads[0].ch == NULL);
293 
294 	g_get_io_channel = true;
295 	g_create_ch = false;
296 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
297 	CU_ASSERT(g_ut_threads[0].ch == NULL);
298 
299 	g_get_io_channel = true;
300 	g_create_ch = true;
301 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
302 	CU_ASSERT(g_ut_threads[0].ch != NULL);
303 	spdk_put_io_channel(g_ut_threads[0].ch);
304 
305 	teardown_test();
306 }
307 
308 static int
309 poller_run_done(void *ctx)
310 {
311 	bool	*poller_run = ctx;
312 
313 	*poller_run = true;
314 
315 	return -1;
316 }
317 
318 static void
319 basic_poller(void)
320 {
321 	struct spdk_poller	*poller = NULL;
322 	bool			poller_run = false;
323 
324 	setup_test();
325 
326 	set_thread(0);
327 	reset_time();
328 	/* Register a poller with no-wait time and test execution */
329 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
330 	CU_ASSERT(poller != NULL);
331 
332 	poll_threads();
333 	CU_ASSERT(poller_run == true);
334 
335 	spdk_poller_unregister(&poller);
336 	CU_ASSERT(poller == NULL);
337 
338 	/* Register a poller with 1000us wait time and test single execution */
339 	poller_run = false;
340 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
341 	CU_ASSERT(poller != NULL);
342 
343 	poll_threads();
344 	CU_ASSERT(poller_run == false);
345 
346 	increment_time(1000);
347 	poll_threads();
348 	CU_ASSERT(poller_run == true);
349 
350 	reset_time();
351 	poller_run = false;
352 	poll_threads();
353 	CU_ASSERT(poller_run == false);
354 
355 	increment_time(1000);
356 	poll_threads();
357 	CU_ASSERT(poller_run == true);
358 
359 	spdk_poller_unregister(&poller);
360 	CU_ASSERT(poller == NULL);
361 
362 	teardown_test();
363 }
364 
365 static void
366 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
367 {
368 	bool *done = cb_arg;
369 
370 	CU_ASSERT(success == true);
371 	*done = true;
372 	spdk_bdev_free_io(bdev_io);
373 }
374 
375 static void
376 put_channel_during_reset(void)
377 {
378 	struct spdk_io_channel *io_ch;
379 	bool done = false;
380 
381 	setup_test();
382 
383 	set_thread(0);
384 	io_ch = spdk_bdev_get_io_channel(g_desc);
385 	CU_ASSERT(io_ch != NULL);
386 
387 	/*
388 	 * Start a reset, but then put the I/O channel before
389 	 *  the deferred messages for the reset get a chance to
390 	 *  execute.
391 	 */
392 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
393 	spdk_put_io_channel(io_ch);
394 	poll_threads();
395 	stub_complete_io(g_bdev.io_target, 0);
396 
397 	teardown_test();
398 }
399 
400 static void
401 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
402 {
403 	enum spdk_bdev_io_status *status = cb_arg;
404 
405 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
406 	spdk_bdev_free_io(bdev_io);
407 }
408 
409 static void
410 aborted_reset(void)
411 {
412 	struct spdk_io_channel *io_ch[2];
413 	enum spdk_bdev_io_status status1, status2;
414 
415 	setup_test();
416 
417 	set_thread(0);
418 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
419 	CU_ASSERT(io_ch[0] != NULL);
420 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
421 	poll_threads();
422 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
423 
424 	/*
425 	 * First reset has been submitted on ch0.  Now submit a second
426 	 *  reset on ch1 which will get queued since there is already a
427 	 *  reset in progress.
428 	 */
429 	set_thread(1);
430 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
431 	CU_ASSERT(io_ch[1] != NULL);
432 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
433 	poll_threads();
434 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
435 
436 	/*
437 	 * Now destroy ch1.  This will abort the queued reset.  Check that
438 	 *  the second reset was completed with failed status.  Also check
439 	 *  that bdev->internal.reset_in_progress != NULL, since the
440 	 *  original reset has not been completed yet.  This ensures that
441 	 *  the bdev code is correctly noticing that the failed reset is
442 	 *  *not* the one that had been submitted to the bdev module.
443 	 */
444 	set_thread(1);
445 	spdk_put_io_channel(io_ch[1]);
446 	poll_threads();
447 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
448 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
449 
450 	/*
451 	 * Now complete the first reset, verify that it completed with SUCCESS
452 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
453 	 */
454 	set_thread(0);
455 	spdk_put_io_channel(io_ch[0]);
456 	stub_complete_io(g_bdev.io_target, 0);
457 	poll_threads();
458 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
459 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
460 
461 	teardown_test();
462 }
463 
464 static void
465 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
466 {
467 	enum spdk_bdev_io_status *status = cb_arg;
468 
469 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
470 	spdk_bdev_free_io(bdev_io);
471 }
472 
473 static void
474 io_during_reset(void)
475 {
476 	struct spdk_io_channel *io_ch[2];
477 	struct spdk_bdev_channel *bdev_ch[2];
478 	enum spdk_bdev_io_status status0, status1, status_reset;
479 	int rc;
480 
481 	setup_test();
482 
483 	/*
484 	 * First test normal case - submit an I/O on each of two channels (with no resets)
485 	 *  and verify they complete successfully.
486 	 */
487 	set_thread(0);
488 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
489 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
490 	CU_ASSERT(bdev_ch[0]->flags == 0);
491 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
492 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
493 	CU_ASSERT(rc == 0);
494 
495 	set_thread(1);
496 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
497 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
498 	CU_ASSERT(bdev_ch[1]->flags == 0);
499 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
500 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
501 	CU_ASSERT(rc == 0);
502 
503 	poll_threads();
504 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
505 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
506 
507 	set_thread(0);
508 	stub_complete_io(g_bdev.io_target, 0);
509 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
510 
511 	set_thread(1);
512 	stub_complete_io(g_bdev.io_target, 0);
513 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
514 
515 	/*
516 	 * Now submit a reset, and leave it pending while we submit I/O on two different
517 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
518 	 *  progress.
519 	 */
520 	set_thread(0);
521 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
522 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
523 	CU_ASSERT(rc == 0);
524 
525 	CU_ASSERT(bdev_ch[0]->flags == 0);
526 	CU_ASSERT(bdev_ch[1]->flags == 0);
527 	poll_threads();
528 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
529 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
530 
531 	set_thread(0);
532 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
533 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
534 	CU_ASSERT(rc == 0);
535 
536 	set_thread(1);
537 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
538 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
539 	CU_ASSERT(rc == 0);
540 
541 	/*
542 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
543 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
544 	 */
545 	poll_threads();
546 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
547 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
548 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
549 
550 	/*
551 	 * Complete the reset
552 	 */
553 	set_thread(0);
554 	stub_complete_io(g_bdev.io_target, 0);
555 
556 	/*
557 	 * Only poll thread 0. We should not get a completion.
558 	 */
559 	poll_thread(0);
560 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
561 
562 	/*
563 	 * Poll both thread 0 and 1 so the messages can propagate and we
564 	 * get a completion.
565 	 */
566 	poll_threads();
567 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
568 
569 	spdk_put_io_channel(io_ch[0]);
570 	set_thread(1);
571 	spdk_put_io_channel(io_ch[1]);
572 	poll_threads();
573 
574 	teardown_test();
575 }
576 
577 static void
578 basic_qos(void)
579 {
580 	struct spdk_io_channel *io_ch[2];
581 	struct spdk_bdev_channel *bdev_ch[2];
582 	struct spdk_bdev *bdev;
583 	enum spdk_bdev_io_status status;
584 	int rc;
585 
586 	setup_test();
587 
588 	/* Enable QoS */
589 	bdev = &g_bdev.bdev;
590 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
591 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
592 	TAILQ_INIT(&bdev->internal.qos->queued);
593 	/*
594 	 * Enable both IOPS and bandwidth rate limits.
595 	 * In this case, both rate limits will take equal effect.
596 	 */
597 	bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
598 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
599 
600 	g_get_io_channel = true;
601 
602 	set_thread(0);
603 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
604 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
605 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
606 
607 	set_thread(1);
608 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
609 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
610 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
611 
612 	/*
613 	 * Send an I/O on thread 0, which is where the QoS thread is running.
614 	 */
615 	set_thread(0);
616 	status = SPDK_BDEV_IO_STATUS_PENDING;
617 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
618 	CU_ASSERT(rc == 0);
619 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
620 	poll_threads();
621 	stub_complete_io(g_bdev.io_target, 0);
622 	poll_threads();
623 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
624 
625 	/* Send an I/O on thread 1. The QoS thread is not running here. */
626 	status = SPDK_BDEV_IO_STATUS_PENDING;
627 	set_thread(1);
628 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
629 	CU_ASSERT(rc == 0);
630 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
631 	poll_threads();
632 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
633 	stub_complete_io(g_bdev.io_target, 0);
634 	poll_threads();
635 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
636 	/* Now complete I/O on thread 0 */
637 	set_thread(0);
638 	poll_threads();
639 	stub_complete_io(g_bdev.io_target, 0);
640 	poll_threads();
641 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
642 
643 	/* Tear down the channels */
644 	set_thread(0);
645 	spdk_put_io_channel(io_ch[0]);
646 	set_thread(1);
647 	spdk_put_io_channel(io_ch[1]);
648 	poll_threads();
649 	set_thread(0);
650 
651 	/* Close the descriptor, which should stop the qos channel */
652 	spdk_bdev_close(g_desc);
653 	poll_threads();
654 	CU_ASSERT(bdev->internal.qos->ch == NULL);
655 
656 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
657 
658 	/* Create the channels in reverse order. */
659 	set_thread(1);
660 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
661 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
662 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
663 
664 	set_thread(0);
665 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
666 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
667 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
668 
669 	/* Confirm that the qos thread is now thread 1 */
670 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
671 
672 	/* Tear down the channels */
673 	set_thread(0);
674 	spdk_put_io_channel(io_ch[0]);
675 	set_thread(1);
676 	spdk_put_io_channel(io_ch[1]);
677 	poll_threads();
678 
679 	set_thread(0);
680 
681 	teardown_test();
682 }
683 
684 static void
685 io_during_qos_queue(void)
686 {
687 	struct spdk_io_channel *io_ch[2];
688 	struct spdk_bdev_channel *bdev_ch[2];
689 	struct spdk_bdev *bdev;
690 	enum spdk_bdev_io_status status0, status1;
691 	int rc;
692 
693 	setup_test();
694 	reset_time();
695 
696 	/* Enable QoS */
697 	bdev = &g_bdev.bdev;
698 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
699 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
700 	TAILQ_INIT(&bdev->internal.qos->queued);
701 	/*
702 	 * Enable both IOPS and bandwidth rate limits.
703 	 * In this case, IOPS rate limit will take effect first.
704 	 */
705 	bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
706 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
707 
708 	g_get_io_channel = true;
709 
710 	/* Create channels */
711 	set_thread(0);
712 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
713 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
714 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
715 
716 	set_thread(1);
717 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
718 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
719 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
720 
721 	/* Send two I/O */
722 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
723 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
724 	CU_ASSERT(rc == 0);
725 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
726 	set_thread(0);
727 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
728 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
729 	CU_ASSERT(rc == 0);
730 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
731 
732 	/* Complete any I/O that arrived at the disk */
733 	poll_threads();
734 	set_thread(1);
735 	stub_complete_io(g_bdev.io_target, 0);
736 	set_thread(0);
737 	stub_complete_io(g_bdev.io_target, 0);
738 	poll_threads();
739 
740 	/* Only one of the I/O should complete. (logical XOR) */
741 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
742 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
743 	} else {
744 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
745 	}
746 
747 	/* Advance in time by a millisecond */
748 	increment_time(1000);
749 
750 	/* Complete more I/O */
751 	poll_threads();
752 	set_thread(1);
753 	stub_complete_io(g_bdev.io_target, 0);
754 	set_thread(0);
755 	stub_complete_io(g_bdev.io_target, 0);
756 	poll_threads();
757 
758 	/* Now the second I/O should be done */
759 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
760 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
761 
762 	/* Tear down the channels */
763 	set_thread(1);
764 	spdk_put_io_channel(io_ch[1]);
765 	set_thread(0);
766 	spdk_put_io_channel(io_ch[0]);
767 	poll_threads();
768 
769 	teardown_test();
770 }
771 
772 static void
773 io_during_qos_reset(void)
774 {
775 	struct spdk_io_channel *io_ch[2];
776 	struct spdk_bdev_channel *bdev_ch[2];
777 	struct spdk_bdev *bdev;
778 	enum spdk_bdev_io_status status0, status1, reset_status;
779 	int rc;
780 
781 	setup_test();
782 	reset_time();
783 
784 	/* Enable QoS */
785 	bdev = &g_bdev.bdev;
786 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
787 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
788 	TAILQ_INIT(&bdev->internal.qos->queued);
789 	/*
790 	 * Enable both IOPS and bandwidth rate limits.
791 	 * In this case, bandwidth rate limit will take effect first.
792 	 */
793 	bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
794 	bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
795 
796 	g_get_io_channel = true;
797 
798 	/* Create channels */
799 	set_thread(0);
800 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
801 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
802 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
803 
804 	set_thread(1);
805 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
806 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
807 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
808 
809 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
810 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
811 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
812 	CU_ASSERT(rc == 0);
813 	set_thread(0);
814 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
815 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
816 	CU_ASSERT(rc == 0);
817 
818 	poll_threads();
819 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
820 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
821 
822 	/* Reset the bdev. */
823 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
824 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
825 	CU_ASSERT(rc == 0);
826 
827 	/* Complete any I/O that arrived at the disk */
828 	poll_threads();
829 	set_thread(1);
830 	stub_complete_io(g_bdev.io_target, 0);
831 	set_thread(0);
832 	stub_complete_io(g_bdev.io_target, 0);
833 	poll_threads();
834 
835 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
836 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
837 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
838 
839 	/* Tear down the channels */
840 	set_thread(1);
841 	spdk_put_io_channel(io_ch[1]);
842 	set_thread(0);
843 	spdk_put_io_channel(io_ch[0]);
844 	poll_threads();
845 
846 	teardown_test();
847 }
848 
849 static void
850 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
851 {
852 	enum spdk_bdev_io_status *status = cb_arg;
853 
854 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
855 	spdk_bdev_free_io(bdev_io);
856 }
857 
858 static void
859 enomem(void)
860 {
861 	struct spdk_io_channel *io_ch;
862 	struct spdk_bdev_channel *bdev_ch;
863 	struct spdk_bdev_shared_resource *shared_resource;
864 	struct ut_bdev_channel *ut_ch;
865 	const uint32_t IO_ARRAY_SIZE = 64;
866 	const uint32_t AVAIL = 20;
867 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
868 	uint32_t nomem_cnt, i;
869 	struct spdk_bdev_io *first_io;
870 	int rc;
871 
872 	setup_test();
873 
874 	set_thread(0);
875 	io_ch = spdk_bdev_get_io_channel(g_desc);
876 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
877 	shared_resource = bdev_ch->shared_resource;
878 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
879 	ut_ch->avail_cnt = AVAIL;
880 
881 	/* First submit a number of IOs equal to what the channel can support. */
882 	for (i = 0; i < AVAIL; i++) {
883 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
884 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
885 		CU_ASSERT(rc == 0);
886 	}
887 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
888 
889 	/*
890 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
891 	 *  the enomem_io list.
892 	 */
893 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
894 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
895 	CU_ASSERT(rc == 0);
896 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
897 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
898 
899 	/*
900 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
901 	 *  the first_io above.
902 	 */
903 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
904 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
905 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
906 		CU_ASSERT(rc == 0);
907 	}
908 
909 	/* Assert that first_io is still at the head of the list. */
910 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
911 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
912 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
913 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
914 
915 	/*
916 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
917 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
918 	 *  list.
919 	 */
920 	stub_complete_io(g_bdev.io_target, 1);
921 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
922 
923 	/*
924 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
925 	 *  and we should see I/O get resubmitted to the test bdev module.
926 	 */
927 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
928 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
929 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
930 
931 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
932 	stub_complete_io(g_bdev.io_target, 1);
933 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
934 
935 	/*
936 	 * Send a reset and confirm that all I/O are completed, including the ones that
937 	 *  were queued on the nomem_io list.
938 	 */
939 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
940 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
941 	poll_threads();
942 	CU_ASSERT(rc == 0);
943 	/* This will complete the reset. */
944 	stub_complete_io(g_bdev.io_target, 0);
945 
946 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
947 	CU_ASSERT(shared_resource->io_outstanding == 0);
948 
949 	spdk_put_io_channel(io_ch);
950 	poll_threads();
951 	teardown_test();
952 }
953 
954 static void
955 enomem_multi_bdev(void)
956 {
957 	struct spdk_io_channel *io_ch;
958 	struct spdk_bdev_channel *bdev_ch;
959 	struct spdk_bdev_shared_resource *shared_resource;
960 	struct ut_bdev_channel *ut_ch;
961 	const uint32_t IO_ARRAY_SIZE = 64;
962 	const uint32_t AVAIL = 20;
963 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
964 	uint32_t i;
965 	struct ut_bdev *second_bdev;
966 	struct spdk_bdev_desc *second_desc;
967 	struct spdk_bdev_channel *second_bdev_ch;
968 	struct spdk_io_channel *second_ch;
969 	int rc;
970 
971 	setup_test();
972 
973 	/* Register second bdev with the same io_target  */
974 	second_bdev = calloc(1, sizeof(*second_bdev));
975 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
976 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
977 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
978 
979 	set_thread(0);
980 	io_ch = spdk_bdev_get_io_channel(g_desc);
981 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
982 	shared_resource = bdev_ch->shared_resource;
983 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
984 	ut_ch->avail_cnt = AVAIL;
985 
986 	second_ch = spdk_bdev_get_io_channel(second_desc);
987 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
988 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
989 
990 	/* Saturate io_target through bdev A. */
991 	for (i = 0; i < AVAIL; i++) {
992 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
993 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
994 		CU_ASSERT(rc == 0);
995 	}
996 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
997 
998 	/*
999 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1000 	 * and then go onto the nomem_io list.
1001 	 */
1002 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1003 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1004 	CU_ASSERT(rc == 0);
1005 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1006 
1007 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1008 	stub_complete_io(g_bdev.io_target, AVAIL);
1009 
1010 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1011 	CU_ASSERT(shared_resource->io_outstanding == 1);
1012 
1013 	/* Now complete our retried I/O  */
1014 	stub_complete_io(g_bdev.io_target, 1);
1015 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1016 
1017 	spdk_put_io_channel(io_ch);
1018 	spdk_put_io_channel(second_ch);
1019 	spdk_bdev_close(second_desc);
1020 	unregister_bdev(second_bdev);
1021 	poll_threads();
1022 	free(second_bdev);
1023 	teardown_test();
1024 }
1025 
1026 
1027 static void
1028 enomem_multi_io_target(void)
1029 {
1030 	struct spdk_io_channel *io_ch;
1031 	struct spdk_bdev_channel *bdev_ch;
1032 	struct ut_bdev_channel *ut_ch;
1033 	const uint32_t IO_ARRAY_SIZE = 64;
1034 	const uint32_t AVAIL = 20;
1035 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1036 	uint32_t i;
1037 	int new_io_device;
1038 	struct ut_bdev *second_bdev;
1039 	struct spdk_bdev_desc *second_desc;
1040 	struct spdk_bdev_channel *second_bdev_ch;
1041 	struct spdk_io_channel *second_ch;
1042 	int rc;
1043 
1044 	setup_test();
1045 
1046 	/* Create new io_target and a second bdev using it */
1047 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1048 				sizeof(struct ut_bdev_channel));
1049 	second_bdev = calloc(1, sizeof(*second_bdev));
1050 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1051 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1052 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1053 
1054 	set_thread(0);
1055 	io_ch = spdk_bdev_get_io_channel(g_desc);
1056 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1057 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1058 	ut_ch->avail_cnt = AVAIL;
1059 
1060 	/* Different io_target should imply a different shared_resource */
1061 	second_ch = spdk_bdev_get_io_channel(second_desc);
1062 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1063 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1064 
1065 	/* Saturate io_target through bdev A. */
1066 	for (i = 0; i < AVAIL; i++) {
1067 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1068 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1069 		CU_ASSERT(rc == 0);
1070 	}
1071 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1072 
1073 	/* Issue one more I/O to fill ENOMEM list. */
1074 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1075 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1076 	CU_ASSERT(rc == 0);
1077 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1078 
1079 	/*
1080 	 * Now submit I/O through the second bdev. This should go through and complete
1081 	 * successfully because we're using a different io_device underneath.
1082 	 */
1083 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1084 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1085 	CU_ASSERT(rc == 0);
1086 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1087 	stub_complete_io(second_bdev->io_target, 1);
1088 
1089 	/* Cleanup; Complete outstanding I/O. */
1090 	stub_complete_io(g_bdev.io_target, AVAIL);
1091 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1092 	/* Complete the ENOMEM I/O */
1093 	stub_complete_io(g_bdev.io_target, 1);
1094 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1095 
1096 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1097 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1098 	spdk_put_io_channel(io_ch);
1099 	spdk_put_io_channel(second_ch);
1100 	spdk_bdev_close(second_desc);
1101 	unregister_bdev(second_bdev);
1102 	spdk_io_device_unregister(&new_io_device, NULL);
1103 	poll_threads();
1104 	free(second_bdev);
1105 	teardown_test();
1106 }
1107 
1108 static void
1109 qos_dynamic_enable_done(void *cb_arg, int status)
1110 {
1111 	int *rc = cb_arg;
1112 	*rc = status;
1113 }
1114 
1115 static void
1116 qos_dynamic_enable(void)
1117 {
1118 	struct spdk_io_channel *io_ch[2];
1119 	struct spdk_bdev_channel *bdev_ch[2];
1120 	struct spdk_bdev *bdev;
1121 	int status, second_status;
1122 
1123 	setup_test();
1124 	reset_time();
1125 
1126 	bdev = &g_bdev.bdev;
1127 
1128 	g_get_io_channel = true;
1129 
1130 	/* Create channels */
1131 	set_thread(0);
1132 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1133 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1134 	CU_ASSERT(bdev_ch[0]->flags == 0);
1135 
1136 	set_thread(1);
1137 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1138 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1139 	CU_ASSERT(bdev_ch[1]->flags == 0);
1140 
1141 	set_thread(0);
1142 
1143 	/* Enable QoS */
1144 	status = -1;
1145 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1146 	poll_threads();
1147 	CU_ASSERT(status == 0);
1148 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1149 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1150 
1151 	/* Disable QoS */
1152 	status = -1;
1153 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1154 	poll_threads();
1155 	CU_ASSERT(status == 0);
1156 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1157 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1158 
1159 	/* Disable QoS again */
1160 	status = -1;
1161 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1162 	poll_threads();
1163 	CU_ASSERT(status == 0); /* This should succeed */
1164 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1165 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1166 
1167 	/* Enable QoS on thread 0 */
1168 	status = -1;
1169 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1170 	poll_threads();
1171 	CU_ASSERT(status == 0);
1172 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1173 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1174 
1175 	/* Disable QoS on thread 1 */
1176 	set_thread(1);
1177 	status = -1;
1178 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1179 	/* Don't poll yet. This should leave the channels with QoS enabled */
1180 	CU_ASSERT(status == -1);
1181 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1182 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1183 
1184 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1185 	second_status = 0;
1186 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1187 	poll_threads();
1188 	CU_ASSERT(status == 0); /* The disable should succeed */
1189 	CU_ASSERT(second_status < 0); /* The enable should fail */
1190 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1191 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1192 
1193 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1194 	status = -1;
1195 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1196 	poll_threads();
1197 	CU_ASSERT(status == 0);
1198 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1199 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1200 
1201 	/* Tear down the channels */
1202 	set_thread(0);
1203 	spdk_put_io_channel(io_ch[0]);
1204 	set_thread(1);
1205 	spdk_put_io_channel(io_ch[1]);
1206 	poll_threads();
1207 
1208 	set_thread(0);
1209 	teardown_test();
1210 }
1211 
1212 int
1213 main(int argc, char **argv)
1214 {
1215 	CU_pSuite	suite = NULL;
1216 	unsigned int	num_failures;
1217 
1218 	if (CU_initialize_registry() != CUE_SUCCESS) {
1219 		return CU_get_error();
1220 	}
1221 
1222 	suite = CU_add_suite("bdev", NULL, NULL);
1223 	if (suite == NULL) {
1224 		CU_cleanup_registry();
1225 		return CU_get_error();
1226 	}
1227 
1228 	if (
1229 		CU_add_test(suite, "basic", basic) == NULL ||
1230 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1231 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1232 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1233 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1234 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1235 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1236 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1237 		CU_add_test(suite, "enomem", enomem) == NULL ||
1238 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1239 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1240 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1241 	) {
1242 		CU_cleanup_registry();
1243 		return CU_get_error();
1244 	}
1245 
1246 	CU_basic_set_mode(CU_BRM_VERBOSE);
1247 	CU_basic_run_tests();
1248 	num_failures = CU_get_number_of_failures();
1249 	CU_cleanup_registry();
1250 	return num_failures;
1251 }
1252