xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision c899854d0371a7cdb3e2fd8c07ccf3d1f0b8089a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct ut_bdev {
57 	struct spdk_bdev	bdev;
58 	void			*io_target;
59 };
60 
61 struct ut_bdev_channel {
62 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
63 	uint32_t			outstanding_cnt;
64 	uint32_t			avail_cnt;
65 };
66 
67 int g_io_device;
68 struct ut_bdev g_bdev;
69 struct spdk_bdev_desc *g_desc;
70 bool g_teardown_done = false;
71 bool g_get_io_channel = true;
72 bool g_create_ch = true;
73 bool g_init_complete_called = false;
74 bool g_fini_start_called = true;
75 
76 static int
77 stub_create_ch(void *io_device, void *ctx_buf)
78 {
79 	struct ut_bdev_channel *ch = ctx_buf;
80 
81 	if (g_create_ch == false) {
82 		return -1;
83 	}
84 
85 	TAILQ_INIT(&ch->outstanding_io);
86 	ch->outstanding_cnt = 0;
87 	/*
88 	 * When avail gets to 0, the submit_request function will return ENOMEM.
89 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
90 	 *  big value that won't get hit.  The ENOMEM tests can then override this
91 	 *  value to something much smaller to induce ENOMEM conditions.
92 	 */
93 	ch->avail_cnt = 2048;
94 	return 0;
95 }
96 
97 static void
98 stub_destroy_ch(void *io_device, void *ctx_buf)
99 {
100 }
101 
102 static struct spdk_io_channel *
103 stub_get_io_channel(void *ctx)
104 {
105 	struct ut_bdev *ut_bdev = ctx;
106 
107 	if (g_get_io_channel == true) {
108 		return spdk_get_io_channel(ut_bdev->io_target);
109 	} else {
110 		return NULL;
111 	}
112 }
113 
114 static int
115 stub_destruct(void *ctx)
116 {
117 	return 0;
118 }
119 
120 static void
121 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
122 {
123 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
124 
125 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
126 		struct spdk_bdev_io *io;
127 
128 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
129 			io = TAILQ_FIRST(&ch->outstanding_io);
130 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
131 			ch->outstanding_cnt--;
132 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
133 			ch->avail_cnt++;
134 		}
135 	}
136 
137 	if (ch->avail_cnt > 0) {
138 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
139 		ch->outstanding_cnt++;
140 		ch->avail_cnt--;
141 	} else {
142 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
143 	}
144 }
145 
146 static uint32_t
147 stub_complete_io(void *io_target, uint32_t num_to_complete)
148 {
149 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
150 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
151 	struct spdk_bdev_io *io;
152 	bool complete_all = (num_to_complete == 0);
153 	uint32_t num_completed = 0;
154 
155 	while (complete_all || num_completed < num_to_complete) {
156 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
157 			break;
158 		}
159 		io = TAILQ_FIRST(&ch->outstanding_io);
160 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
161 		ch->outstanding_cnt--;
162 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
163 		ch->avail_cnt++;
164 		num_completed++;
165 	}
166 
167 	spdk_put_io_channel(_ch);
168 	return num_completed;
169 }
170 
171 static struct spdk_bdev_fn_table fn_table = {
172 	.get_io_channel =	stub_get_io_channel,
173 	.destruct =		stub_destruct,
174 	.submit_request =	stub_submit_request,
175 };
176 
177 static int
178 module_init(void)
179 {
180 	return 0;
181 }
182 
183 static void
184 module_fini(void)
185 {
186 }
187 
188 static void
189 init_complete(void)
190 {
191 	g_init_complete_called = true;
192 }
193 
194 static void
195 fini_start(void)
196 {
197 	g_fini_start_called = true;
198 }
199 
200 struct spdk_bdev_module bdev_ut_if = {
201 	.name = "bdev_ut",
202 	.module_init = module_init,
203 	.module_fini = module_fini,
204 	.init_complete = init_complete,
205 	.fini_start = fini_start,
206 };
207 
208 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
209 
210 static void
211 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
212 {
213 	memset(ut_bdev, 0, sizeof(*ut_bdev));
214 
215 	ut_bdev->io_target = io_target;
216 	ut_bdev->bdev.ctxt = ut_bdev;
217 	ut_bdev->bdev.name = name;
218 	ut_bdev->bdev.fn_table = &fn_table;
219 	ut_bdev->bdev.module = &bdev_ut_if;
220 	ut_bdev->bdev.blocklen = 4096;
221 	ut_bdev->bdev.blockcnt = 1024;
222 
223 	spdk_bdev_register(&ut_bdev->bdev);
224 }
225 
226 static void
227 unregister_bdev(struct ut_bdev *ut_bdev)
228 {
229 	/* Handle any deferred messages. */
230 	poll_threads();
231 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
232 }
233 
234 static void
235 bdev_init_cb(void *done, int rc)
236 {
237 	CU_ASSERT(rc == 0);
238 	*(bool *)done = true;
239 }
240 
241 static void
242 setup_test(void)
243 {
244 	bool done = false;
245 
246 	allocate_threads(BDEV_UT_NUM_THREADS);
247 	spdk_bdev_initialize(bdev_init_cb, &done);
248 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
249 				sizeof(struct ut_bdev_channel));
250 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
251 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
252 }
253 
254 static void
255 finish_cb(void *cb_arg)
256 {
257 	g_teardown_done = true;
258 }
259 
260 static void
261 teardown_test(void)
262 {
263 	g_teardown_done = false;
264 	spdk_bdev_close(g_desc);
265 	g_desc = NULL;
266 	unregister_bdev(&g_bdev);
267 	spdk_io_device_unregister(&g_io_device, NULL);
268 	spdk_bdev_finish(finish_cb, NULL);
269 	poll_threads();
270 	memset(&g_bdev, 0, sizeof(g_bdev));
271 	CU_ASSERT(g_teardown_done == true);
272 	g_teardown_done = false;
273 	free_threads();
274 }
275 
276 static uint32_t
277 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
278 {
279 	struct spdk_bdev_io *io;
280 	uint32_t cnt = 0;
281 
282 	TAILQ_FOREACH(io, tailq, internal.link) {
283 		cnt++;
284 	}
285 
286 	return cnt;
287 }
288 
289 static void
290 basic(void)
291 {
292 	g_init_complete_called = false;
293 	setup_test();
294 	CU_ASSERT(g_init_complete_called == true);
295 
296 	set_thread(0);
297 
298 	g_get_io_channel = false;
299 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
300 	CU_ASSERT(g_ut_threads[0].ch == NULL);
301 
302 	g_get_io_channel = true;
303 	g_create_ch = false;
304 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
305 	CU_ASSERT(g_ut_threads[0].ch == NULL);
306 
307 	g_get_io_channel = true;
308 	g_create_ch = true;
309 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
310 	CU_ASSERT(g_ut_threads[0].ch != NULL);
311 	spdk_put_io_channel(g_ut_threads[0].ch);
312 
313 	g_fini_start_called = false;
314 	teardown_test();
315 	CU_ASSERT(g_fini_start_called == true);
316 }
317 
318 static void
319 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
320 {
321 	bool *done = cb_arg;
322 
323 	CU_ASSERT(success == true);
324 	*done = true;
325 	spdk_bdev_free_io(bdev_io);
326 }
327 
328 static void
329 put_channel_during_reset(void)
330 {
331 	struct spdk_io_channel *io_ch;
332 	bool done = false;
333 
334 	setup_test();
335 
336 	set_thread(0);
337 	io_ch = spdk_bdev_get_io_channel(g_desc);
338 	CU_ASSERT(io_ch != NULL);
339 
340 	/*
341 	 * Start a reset, but then put the I/O channel before
342 	 *  the deferred messages for the reset get a chance to
343 	 *  execute.
344 	 */
345 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
346 	spdk_put_io_channel(io_ch);
347 	poll_threads();
348 	stub_complete_io(g_bdev.io_target, 0);
349 
350 	teardown_test();
351 }
352 
353 static void
354 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
355 {
356 	enum spdk_bdev_io_status *status = cb_arg;
357 
358 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
359 	spdk_bdev_free_io(bdev_io);
360 }
361 
362 static void
363 aborted_reset(void)
364 {
365 	struct spdk_io_channel *io_ch[2];
366 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
367 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
368 
369 	setup_test();
370 
371 	set_thread(0);
372 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
373 	CU_ASSERT(io_ch[0] != NULL);
374 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
375 	poll_threads();
376 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
377 
378 	/*
379 	 * First reset has been submitted on ch0.  Now submit a second
380 	 *  reset on ch1 which will get queued since there is already a
381 	 *  reset in progress.
382 	 */
383 	set_thread(1);
384 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
385 	CU_ASSERT(io_ch[1] != NULL);
386 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
387 	poll_threads();
388 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
389 
390 	/*
391 	 * Now destroy ch1.  This will abort the queued reset.  Check that
392 	 *  the second reset was completed with failed status.  Also check
393 	 *  that bdev->internal.reset_in_progress != NULL, since the
394 	 *  original reset has not been completed yet.  This ensures that
395 	 *  the bdev code is correctly noticing that the failed reset is
396 	 *  *not* the one that had been submitted to the bdev module.
397 	 */
398 	set_thread(1);
399 	spdk_put_io_channel(io_ch[1]);
400 	poll_threads();
401 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
402 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
403 
404 	/*
405 	 * Now complete the first reset, verify that it completed with SUCCESS
406 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
407 	 */
408 	set_thread(0);
409 	spdk_put_io_channel(io_ch[0]);
410 	stub_complete_io(g_bdev.io_target, 0);
411 	poll_threads();
412 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
413 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
414 
415 	teardown_test();
416 }
417 
418 static void
419 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
420 {
421 	enum spdk_bdev_io_status *status = cb_arg;
422 
423 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
424 	spdk_bdev_free_io(bdev_io);
425 }
426 
427 static void
428 io_during_reset(void)
429 {
430 	struct spdk_io_channel *io_ch[2];
431 	struct spdk_bdev_channel *bdev_ch[2];
432 	enum spdk_bdev_io_status status0, status1, status_reset;
433 	int rc;
434 
435 	setup_test();
436 
437 	/*
438 	 * First test normal case - submit an I/O on each of two channels (with no resets)
439 	 *  and verify they complete successfully.
440 	 */
441 	set_thread(0);
442 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
443 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
444 	CU_ASSERT(bdev_ch[0]->flags == 0);
445 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
446 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
447 	CU_ASSERT(rc == 0);
448 
449 	set_thread(1);
450 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
451 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
452 	CU_ASSERT(bdev_ch[1]->flags == 0);
453 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
454 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
455 	CU_ASSERT(rc == 0);
456 
457 	poll_threads();
458 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
459 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
460 
461 	set_thread(0);
462 	stub_complete_io(g_bdev.io_target, 0);
463 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
464 
465 	set_thread(1);
466 	stub_complete_io(g_bdev.io_target, 0);
467 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
468 
469 	/*
470 	 * Now submit a reset, and leave it pending while we submit I/O on two different
471 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
472 	 *  progress.
473 	 */
474 	set_thread(0);
475 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
476 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
477 	CU_ASSERT(rc == 0);
478 
479 	CU_ASSERT(bdev_ch[0]->flags == 0);
480 	CU_ASSERT(bdev_ch[1]->flags == 0);
481 	poll_threads();
482 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
483 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
484 
485 	set_thread(0);
486 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
487 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
488 	CU_ASSERT(rc == 0);
489 
490 	set_thread(1);
491 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
492 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
493 	CU_ASSERT(rc == 0);
494 
495 	/*
496 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
497 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
498 	 */
499 	poll_threads();
500 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
501 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
502 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
503 
504 	/*
505 	 * Complete the reset
506 	 */
507 	set_thread(0);
508 	stub_complete_io(g_bdev.io_target, 0);
509 
510 	/*
511 	 * Only poll thread 0. We should not get a completion.
512 	 */
513 	poll_thread(0);
514 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
515 
516 	/*
517 	 * Poll both thread 0 and 1 so the messages can propagate and we
518 	 * get a completion.
519 	 */
520 	poll_threads();
521 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
522 
523 	spdk_put_io_channel(io_ch[0]);
524 	set_thread(1);
525 	spdk_put_io_channel(io_ch[1]);
526 	poll_threads();
527 
528 	teardown_test();
529 }
530 
531 static void
532 basic_qos(void)
533 {
534 	struct spdk_io_channel *io_ch[2];
535 	struct spdk_bdev_channel *bdev_ch[2];
536 	struct spdk_bdev *bdev;
537 	enum spdk_bdev_io_status status;
538 	int rc;
539 
540 	setup_test();
541 
542 	/* Enable QoS */
543 	bdev = &g_bdev.bdev;
544 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
545 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
546 	TAILQ_INIT(&bdev->internal.qos->queued);
547 	/*
548 	 * Enable both IOPS and bandwidth rate limits.
549 	 * In this case, both rate limits will take equal effect.
550 	 */
551 	bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
552 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
553 
554 	g_get_io_channel = true;
555 
556 	set_thread(0);
557 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
558 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
559 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
560 
561 	set_thread(1);
562 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
563 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
564 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
565 
566 	/*
567 	 * Send an I/O on thread 0, which is where the QoS thread is running.
568 	 */
569 	set_thread(0);
570 	status = SPDK_BDEV_IO_STATUS_PENDING;
571 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
572 	CU_ASSERT(rc == 0);
573 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
574 	poll_threads();
575 	stub_complete_io(g_bdev.io_target, 0);
576 	poll_threads();
577 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
578 
579 	/* Send an I/O on thread 1. The QoS thread is not running here. */
580 	status = SPDK_BDEV_IO_STATUS_PENDING;
581 	set_thread(1);
582 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
583 	CU_ASSERT(rc == 0);
584 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
585 	poll_threads();
586 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
587 	stub_complete_io(g_bdev.io_target, 0);
588 	poll_threads();
589 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
590 	/* Now complete I/O on thread 0 */
591 	set_thread(0);
592 	poll_threads();
593 	stub_complete_io(g_bdev.io_target, 0);
594 	poll_threads();
595 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
596 
597 	/* Tear down the channels */
598 	set_thread(0);
599 	spdk_put_io_channel(io_ch[0]);
600 	set_thread(1);
601 	spdk_put_io_channel(io_ch[1]);
602 	poll_threads();
603 	set_thread(0);
604 
605 	/* Close the descriptor, which should stop the qos channel */
606 	spdk_bdev_close(g_desc);
607 	poll_threads();
608 	CU_ASSERT(bdev->internal.qos->ch == NULL);
609 
610 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
611 
612 	/* Create the channels in reverse order. */
613 	set_thread(1);
614 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
615 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
616 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
617 
618 	set_thread(0);
619 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
620 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
621 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
622 
623 	/* Confirm that the qos thread is now thread 1 */
624 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
625 
626 	/* Tear down the channels */
627 	set_thread(0);
628 	spdk_put_io_channel(io_ch[0]);
629 	set_thread(1);
630 	spdk_put_io_channel(io_ch[1]);
631 	poll_threads();
632 
633 	set_thread(0);
634 
635 	teardown_test();
636 }
637 
638 static void
639 io_during_qos_queue(void)
640 {
641 	struct spdk_io_channel *io_ch[2];
642 	struct spdk_bdev_channel *bdev_ch[2];
643 	struct spdk_bdev *bdev;
644 	enum spdk_bdev_io_status status0, status1;
645 	int rc;
646 
647 	setup_test();
648 	reset_time();
649 
650 	/* Enable QoS */
651 	bdev = &g_bdev.bdev;
652 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
653 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
654 	TAILQ_INIT(&bdev->internal.qos->queued);
655 	/*
656 	 * Enable both IOPS and bandwidth rate limits.
657 	 * In this case, IOPS rate limit will take effect first.
658 	 */
659 	bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
660 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
661 
662 	g_get_io_channel = true;
663 
664 	/* Create channels */
665 	set_thread(0);
666 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
667 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
668 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
669 
670 	set_thread(1);
671 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
672 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
673 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
674 
675 	/* Send two I/O */
676 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
677 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
678 	CU_ASSERT(rc == 0);
679 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
680 	set_thread(0);
681 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
682 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
683 	CU_ASSERT(rc == 0);
684 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
685 
686 	/* Complete any I/O that arrived at the disk */
687 	poll_threads();
688 	set_thread(1);
689 	stub_complete_io(g_bdev.io_target, 0);
690 	set_thread(0);
691 	stub_complete_io(g_bdev.io_target, 0);
692 	poll_threads();
693 
694 	/* Only one of the I/O should complete. (logical XOR) */
695 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
696 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
697 	} else {
698 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
699 	}
700 
701 	/* Advance in time by a millisecond */
702 	increment_time(1000);
703 
704 	/* Complete more I/O */
705 	poll_threads();
706 	set_thread(1);
707 	stub_complete_io(g_bdev.io_target, 0);
708 	set_thread(0);
709 	stub_complete_io(g_bdev.io_target, 0);
710 	poll_threads();
711 
712 	/* Now the second I/O should be done */
713 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
714 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
715 
716 	/* Tear down the channels */
717 	set_thread(1);
718 	spdk_put_io_channel(io_ch[1]);
719 	set_thread(0);
720 	spdk_put_io_channel(io_ch[0]);
721 	poll_threads();
722 
723 	teardown_test();
724 }
725 
726 static void
727 io_during_qos_reset(void)
728 {
729 	struct spdk_io_channel *io_ch[2];
730 	struct spdk_bdev_channel *bdev_ch[2];
731 	struct spdk_bdev *bdev;
732 	enum spdk_bdev_io_status status0, status1, reset_status;
733 	int rc;
734 
735 	setup_test();
736 	reset_time();
737 
738 	/* Enable QoS */
739 	bdev = &g_bdev.bdev;
740 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
741 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
742 	TAILQ_INIT(&bdev->internal.qos->queued);
743 	/*
744 	 * Enable both IOPS and bandwidth rate limits.
745 	 * In this case, bandwidth rate limit will take effect first.
746 	 */
747 	bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
748 	bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
749 
750 	g_get_io_channel = true;
751 
752 	/* Create channels */
753 	set_thread(0);
754 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
755 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
756 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
757 
758 	set_thread(1);
759 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
760 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
761 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
762 
763 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
764 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
765 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
766 	CU_ASSERT(rc == 0);
767 	set_thread(0);
768 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
769 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
770 	CU_ASSERT(rc == 0);
771 
772 	poll_threads();
773 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
774 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
775 
776 	/* Reset the bdev. */
777 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
778 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
779 	CU_ASSERT(rc == 0);
780 
781 	/* Complete any I/O that arrived at the disk */
782 	poll_threads();
783 	set_thread(1);
784 	stub_complete_io(g_bdev.io_target, 0);
785 	set_thread(0);
786 	stub_complete_io(g_bdev.io_target, 0);
787 	poll_threads();
788 
789 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
790 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
791 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
792 
793 	/* Tear down the channels */
794 	set_thread(1);
795 	spdk_put_io_channel(io_ch[1]);
796 	set_thread(0);
797 	spdk_put_io_channel(io_ch[0]);
798 	poll_threads();
799 
800 	teardown_test();
801 }
802 
803 static void
804 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
805 {
806 	enum spdk_bdev_io_status *status = cb_arg;
807 
808 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
809 	spdk_bdev_free_io(bdev_io);
810 }
811 
812 static void
813 enomem(void)
814 {
815 	struct spdk_io_channel *io_ch;
816 	struct spdk_bdev_channel *bdev_ch;
817 	struct spdk_bdev_shared_resource *shared_resource;
818 	struct ut_bdev_channel *ut_ch;
819 	const uint32_t IO_ARRAY_SIZE = 64;
820 	const uint32_t AVAIL = 20;
821 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
822 	uint32_t nomem_cnt, i;
823 	struct spdk_bdev_io *first_io;
824 	int rc;
825 
826 	setup_test();
827 
828 	set_thread(0);
829 	io_ch = spdk_bdev_get_io_channel(g_desc);
830 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
831 	shared_resource = bdev_ch->shared_resource;
832 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
833 	ut_ch->avail_cnt = AVAIL;
834 
835 	/* First submit a number of IOs equal to what the channel can support. */
836 	for (i = 0; i < AVAIL; i++) {
837 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
838 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
839 		CU_ASSERT(rc == 0);
840 	}
841 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
842 
843 	/*
844 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
845 	 *  the enomem_io list.
846 	 */
847 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
848 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
849 	CU_ASSERT(rc == 0);
850 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
851 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
852 
853 	/*
854 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
855 	 *  the first_io above.
856 	 */
857 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
858 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
859 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
860 		CU_ASSERT(rc == 0);
861 	}
862 
863 	/* Assert that first_io is still at the head of the list. */
864 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
865 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
866 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
867 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
868 
869 	/*
870 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
871 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
872 	 *  list.
873 	 */
874 	stub_complete_io(g_bdev.io_target, 1);
875 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
876 
877 	/*
878 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
879 	 *  and we should see I/O get resubmitted to the test bdev module.
880 	 */
881 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
882 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
883 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
884 
885 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
886 	stub_complete_io(g_bdev.io_target, 1);
887 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
888 
889 	/*
890 	 * Send a reset and confirm that all I/O are completed, including the ones that
891 	 *  were queued on the nomem_io list.
892 	 */
893 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
894 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
895 	poll_threads();
896 	CU_ASSERT(rc == 0);
897 	/* This will complete the reset. */
898 	stub_complete_io(g_bdev.io_target, 0);
899 
900 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
901 	CU_ASSERT(shared_resource->io_outstanding == 0);
902 
903 	spdk_put_io_channel(io_ch);
904 	poll_threads();
905 	teardown_test();
906 }
907 
908 static void
909 enomem_multi_bdev(void)
910 {
911 	struct spdk_io_channel *io_ch;
912 	struct spdk_bdev_channel *bdev_ch;
913 	struct spdk_bdev_shared_resource *shared_resource;
914 	struct ut_bdev_channel *ut_ch;
915 	const uint32_t IO_ARRAY_SIZE = 64;
916 	const uint32_t AVAIL = 20;
917 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
918 	uint32_t i;
919 	struct ut_bdev *second_bdev;
920 	struct spdk_bdev_desc *second_desc;
921 	struct spdk_bdev_channel *second_bdev_ch;
922 	struct spdk_io_channel *second_ch;
923 	int rc;
924 
925 	setup_test();
926 
927 	/* Register second bdev with the same io_target  */
928 	second_bdev = calloc(1, sizeof(*second_bdev));
929 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
930 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
931 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
932 
933 	set_thread(0);
934 	io_ch = spdk_bdev_get_io_channel(g_desc);
935 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
936 	shared_resource = bdev_ch->shared_resource;
937 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
938 	ut_ch->avail_cnt = AVAIL;
939 
940 	second_ch = spdk_bdev_get_io_channel(second_desc);
941 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
942 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
943 
944 	/* Saturate io_target through bdev A. */
945 	for (i = 0; i < AVAIL; i++) {
946 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
947 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
948 		CU_ASSERT(rc == 0);
949 	}
950 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
951 
952 	/*
953 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
954 	 * and then go onto the nomem_io list.
955 	 */
956 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
957 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
958 	CU_ASSERT(rc == 0);
959 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
960 
961 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
962 	stub_complete_io(g_bdev.io_target, AVAIL);
963 
964 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
965 	CU_ASSERT(shared_resource->io_outstanding == 1);
966 
967 	/* Now complete our retried I/O  */
968 	stub_complete_io(g_bdev.io_target, 1);
969 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
970 
971 	spdk_put_io_channel(io_ch);
972 	spdk_put_io_channel(second_ch);
973 	spdk_bdev_close(second_desc);
974 	unregister_bdev(second_bdev);
975 	poll_threads();
976 	free(second_bdev);
977 	teardown_test();
978 }
979 
980 
981 static void
982 enomem_multi_io_target(void)
983 {
984 	struct spdk_io_channel *io_ch;
985 	struct spdk_bdev_channel *bdev_ch;
986 	struct ut_bdev_channel *ut_ch;
987 	const uint32_t IO_ARRAY_SIZE = 64;
988 	const uint32_t AVAIL = 20;
989 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
990 	uint32_t i;
991 	int new_io_device;
992 	struct ut_bdev *second_bdev;
993 	struct spdk_bdev_desc *second_desc;
994 	struct spdk_bdev_channel *second_bdev_ch;
995 	struct spdk_io_channel *second_ch;
996 	int rc;
997 
998 	setup_test();
999 
1000 	/* Create new io_target and a second bdev using it */
1001 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1002 				sizeof(struct ut_bdev_channel));
1003 	second_bdev = calloc(1, sizeof(*second_bdev));
1004 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1005 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1006 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1007 
1008 	set_thread(0);
1009 	io_ch = spdk_bdev_get_io_channel(g_desc);
1010 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1011 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1012 	ut_ch->avail_cnt = AVAIL;
1013 
1014 	/* Different io_target should imply a different shared_resource */
1015 	second_ch = spdk_bdev_get_io_channel(second_desc);
1016 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1017 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1018 
1019 	/* Saturate io_target through bdev A. */
1020 	for (i = 0; i < AVAIL; i++) {
1021 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1022 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1023 		CU_ASSERT(rc == 0);
1024 	}
1025 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1026 
1027 	/* Issue one more I/O to fill ENOMEM list. */
1028 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1029 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1032 
1033 	/*
1034 	 * Now submit I/O through the second bdev. This should go through and complete
1035 	 * successfully because we're using a different io_device underneath.
1036 	 */
1037 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1038 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1039 	CU_ASSERT(rc == 0);
1040 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1041 	stub_complete_io(second_bdev->io_target, 1);
1042 
1043 	/* Cleanup; Complete outstanding I/O. */
1044 	stub_complete_io(g_bdev.io_target, AVAIL);
1045 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1046 	/* Complete the ENOMEM I/O */
1047 	stub_complete_io(g_bdev.io_target, 1);
1048 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1049 
1050 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1051 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1052 	spdk_put_io_channel(io_ch);
1053 	spdk_put_io_channel(second_ch);
1054 	spdk_bdev_close(second_desc);
1055 	unregister_bdev(second_bdev);
1056 	spdk_io_device_unregister(&new_io_device, NULL);
1057 	poll_threads();
1058 	free(second_bdev);
1059 	teardown_test();
1060 }
1061 
1062 static void
1063 qos_dynamic_enable_done(void *cb_arg, int status)
1064 {
1065 	int *rc = cb_arg;
1066 	*rc = status;
1067 }
1068 
1069 static void
1070 qos_dynamic_enable(void)
1071 {
1072 	struct spdk_io_channel *io_ch[2];
1073 	struct spdk_bdev_channel *bdev_ch[2];
1074 	struct spdk_bdev *bdev;
1075 	enum spdk_bdev_io_status bdev_io_status[2];
1076 	int status, second_status, rc, i;
1077 
1078 	setup_test();
1079 	reset_time();
1080 
1081 	bdev = &g_bdev.bdev;
1082 
1083 	g_get_io_channel = true;
1084 
1085 	/* Create channels */
1086 	set_thread(0);
1087 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1088 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1089 	CU_ASSERT(bdev_ch[0]->flags == 0);
1090 
1091 	set_thread(1);
1092 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1093 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1094 	CU_ASSERT(bdev_ch[1]->flags == 0);
1095 
1096 	set_thread(0);
1097 
1098 	/* Enable QoS */
1099 	status = -1;
1100 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1101 	poll_threads();
1102 	CU_ASSERT(status == 0);
1103 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1104 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1105 
1106 	/*
1107 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1108 	 * Additional I/O will then be queued.
1109 	 */
1110 	set_thread(0);
1111 	for (i = 0; i < 10; i++) {
1112 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1113 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1114 		CU_ASSERT(rc == 0);
1115 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1116 		poll_thread(0);
1117 		stub_complete_io(g_bdev.io_target, 0);
1118 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1119 	}
1120 
1121 	/*
1122 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1123 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1124 	 *  1) are not aborted
1125 	 *  2) are sent back to their original thread for resubmission
1126 	 */
1127 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1128 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1129 	CU_ASSERT(rc == 0);
1130 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1131 	set_thread(1);
1132 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1133 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1134 	CU_ASSERT(rc == 0);
1135 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1136 	poll_threads();
1137 
1138 	/* Disable QoS */
1139 	status = -1;
1140 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1141 	poll_threads();
1142 	CU_ASSERT(status == 0);
1143 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1144 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1145 
1146 	/*
1147 	 * All I/O should have been resubmitted back on their original thread.  Complete
1148 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1149 	 */
1150 	set_thread(0);
1151 	stub_complete_io(g_bdev.io_target, 0);
1152 	poll_threads();
1153 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1154 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1155 
1156 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1157 	set_thread(1);
1158 	stub_complete_io(g_bdev.io_target, 0);
1159 	poll_threads();
1160 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1161 
1162 	/* Disable QoS again */
1163 	status = -1;
1164 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1165 	poll_threads();
1166 	CU_ASSERT(status == 0); /* This should succeed */
1167 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1168 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1169 
1170 	/* Enable QoS on thread 0 */
1171 	status = -1;
1172 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1173 	poll_threads();
1174 	CU_ASSERT(status == 0);
1175 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1176 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1177 
1178 	/* Disable QoS on thread 1 */
1179 	set_thread(1);
1180 	status = -1;
1181 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1182 	/* Don't poll yet. This should leave the channels with QoS enabled */
1183 	CU_ASSERT(status == -1);
1184 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1185 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1186 
1187 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1188 	second_status = 0;
1189 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1190 	poll_threads();
1191 	CU_ASSERT(status == 0); /* The disable should succeed */
1192 	CU_ASSERT(second_status < 0); /* The enable should fail */
1193 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1194 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1195 
1196 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1197 	status = -1;
1198 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1199 	poll_threads();
1200 	CU_ASSERT(status == 0);
1201 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1202 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1203 
1204 	/* Tear down the channels */
1205 	set_thread(0);
1206 	spdk_put_io_channel(io_ch[0]);
1207 	set_thread(1);
1208 	spdk_put_io_channel(io_ch[1]);
1209 	poll_threads();
1210 
1211 	set_thread(0);
1212 	teardown_test();
1213 }
1214 
1215 int
1216 main(int argc, char **argv)
1217 {
1218 	CU_pSuite	suite = NULL;
1219 	unsigned int	num_failures;
1220 
1221 	if (CU_initialize_registry() != CUE_SUCCESS) {
1222 		return CU_get_error();
1223 	}
1224 
1225 	suite = CU_add_suite("bdev", NULL, NULL);
1226 	if (suite == NULL) {
1227 		CU_cleanup_registry();
1228 		return CU_get_error();
1229 	}
1230 
1231 	if (
1232 		CU_add_test(suite, "basic", basic) == NULL ||
1233 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1234 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1235 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1236 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1237 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1238 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1239 		CU_add_test(suite, "enomem", enomem) == NULL ||
1240 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1241 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1242 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1243 	) {
1244 		CU_cleanup_registry();
1245 		return CU_get_error();
1246 	}
1247 
1248 	CU_basic_set_mode(CU_BRM_VERBOSE);
1249 	CU_basic_run_tests();
1250 	num_failures = CU_get_number_of_failures();
1251 	CU_cleanup_registry();
1252 	return num_failures;
1253 }
1254