xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision afaabcce2388835082b8653b595898f9ca8c3c24)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/test_env.c"
37 #include "common/lib/ut_multithread.c"
38 #include "unit/lib/json_mock.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
51 		const char *name), NULL);
52 DEFINE_STUB(spdk_conf_section_get_nmval, char *,
53 	    (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
54 DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
55 
56 struct spdk_trace_histories *g_trace_histories;
57 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
58 DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
59 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
60 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
61 		uint16_t tpoint_id, uint8_t owner_type,
62 		uint8_t object_type, uint8_t new_object,
63 		uint8_t arg1_is_ptr, uint8_t arg1_is_alias,
64 		const char *arg1_name));
65 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
66 				   uint32_t size, uint64_t object_id, uint64_t arg1));
67 
68 struct ut_bdev {
69 	struct spdk_bdev	bdev;
70 	void			*io_target;
71 };
72 
73 struct ut_bdev_channel {
74 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
75 	uint32_t			outstanding_cnt;
76 	uint32_t			avail_cnt;
77 };
78 
79 int g_io_device;
80 struct ut_bdev g_bdev;
81 struct spdk_bdev_desc *g_desc;
82 bool g_teardown_done = false;
83 bool g_get_io_channel = true;
84 bool g_create_ch = true;
85 bool g_init_complete_called = false;
86 bool g_fini_start_called = true;
87 
88 static int
89 stub_create_ch(void *io_device, void *ctx_buf)
90 {
91 	struct ut_bdev_channel *ch = ctx_buf;
92 
93 	if (g_create_ch == false) {
94 		return -1;
95 	}
96 
97 	TAILQ_INIT(&ch->outstanding_io);
98 	ch->outstanding_cnt = 0;
99 	/*
100 	 * When avail gets to 0, the submit_request function will return ENOMEM.
101 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
102 	 *  big value that won't get hit.  The ENOMEM tests can then override this
103 	 *  value to something much smaller to induce ENOMEM conditions.
104 	 */
105 	ch->avail_cnt = 2048;
106 	return 0;
107 }
108 
109 static void
110 stub_destroy_ch(void *io_device, void *ctx_buf)
111 {
112 }
113 
114 static struct spdk_io_channel *
115 stub_get_io_channel(void *ctx)
116 {
117 	struct ut_bdev *ut_bdev = ctx;
118 
119 	if (g_get_io_channel == true) {
120 		return spdk_get_io_channel(ut_bdev->io_target);
121 	} else {
122 		return NULL;
123 	}
124 }
125 
126 static int
127 stub_destruct(void *ctx)
128 {
129 	return 0;
130 }
131 
132 static void
133 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
134 {
135 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
136 
137 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
138 		struct spdk_bdev_io *io;
139 
140 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
141 			io = TAILQ_FIRST(&ch->outstanding_io);
142 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
143 			ch->outstanding_cnt--;
144 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
145 			ch->avail_cnt++;
146 		}
147 	}
148 
149 	if (ch->avail_cnt > 0) {
150 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
151 		ch->outstanding_cnt++;
152 		ch->avail_cnt--;
153 	} else {
154 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
155 	}
156 }
157 
158 static uint32_t
159 stub_complete_io(void *io_target, uint32_t num_to_complete)
160 {
161 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
162 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
163 	struct spdk_bdev_io *io;
164 	bool complete_all = (num_to_complete == 0);
165 	uint32_t num_completed = 0;
166 
167 	while (complete_all || num_completed < num_to_complete) {
168 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
169 			break;
170 		}
171 		io = TAILQ_FIRST(&ch->outstanding_io);
172 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
173 		ch->outstanding_cnt--;
174 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
175 		ch->avail_cnt++;
176 		num_completed++;
177 	}
178 
179 	spdk_put_io_channel(_ch);
180 	return num_completed;
181 }
182 
183 static struct spdk_bdev_fn_table fn_table = {
184 	.get_io_channel =	stub_get_io_channel,
185 	.destruct =		stub_destruct,
186 	.submit_request =	stub_submit_request,
187 };
188 
189 static int
190 module_init(void)
191 {
192 	return 0;
193 }
194 
195 static void
196 module_fini(void)
197 {
198 }
199 
200 static void
201 init_complete(void)
202 {
203 	g_init_complete_called = true;
204 }
205 
206 static void
207 fini_start(void)
208 {
209 	g_fini_start_called = true;
210 }
211 
212 struct spdk_bdev_module bdev_ut_if = {
213 	.name = "bdev_ut",
214 	.module_init = module_init,
215 	.module_fini = module_fini,
216 	.init_complete = init_complete,
217 	.fini_start = fini_start,
218 };
219 
220 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
221 
222 static void
223 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
224 {
225 	memset(ut_bdev, 0, sizeof(*ut_bdev));
226 
227 	ut_bdev->io_target = io_target;
228 	ut_bdev->bdev.ctxt = ut_bdev;
229 	ut_bdev->bdev.name = name;
230 	ut_bdev->bdev.fn_table = &fn_table;
231 	ut_bdev->bdev.module = &bdev_ut_if;
232 	ut_bdev->bdev.blocklen = 4096;
233 	ut_bdev->bdev.blockcnt = 1024;
234 
235 	spdk_bdev_register(&ut_bdev->bdev);
236 }
237 
238 static void
239 unregister_bdev(struct ut_bdev *ut_bdev)
240 {
241 	/* Handle any deferred messages. */
242 	poll_threads();
243 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
244 }
245 
246 static void
247 bdev_init_cb(void *done, int rc)
248 {
249 	CU_ASSERT(rc == 0);
250 	*(bool *)done = true;
251 }
252 
253 static void
254 setup_test(void)
255 {
256 	bool done = false;
257 
258 	allocate_threads(BDEV_UT_NUM_THREADS);
259 	spdk_bdev_initialize(bdev_init_cb, &done);
260 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
261 				sizeof(struct ut_bdev_channel));
262 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
263 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
264 }
265 
266 static void
267 finish_cb(void *cb_arg)
268 {
269 	g_teardown_done = true;
270 }
271 
272 static void
273 teardown_test(void)
274 {
275 	g_teardown_done = false;
276 	spdk_bdev_close(g_desc);
277 	g_desc = NULL;
278 	unregister_bdev(&g_bdev);
279 	spdk_io_device_unregister(&g_io_device, NULL);
280 	spdk_bdev_finish(finish_cb, NULL);
281 	poll_threads();
282 	memset(&g_bdev, 0, sizeof(g_bdev));
283 	CU_ASSERT(g_teardown_done == true);
284 	g_teardown_done = false;
285 	free_threads();
286 }
287 
288 static uint32_t
289 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
290 {
291 	struct spdk_bdev_io *io;
292 	uint32_t cnt = 0;
293 
294 	TAILQ_FOREACH(io, tailq, internal.link) {
295 		cnt++;
296 	}
297 
298 	return cnt;
299 }
300 
301 static void
302 basic(void)
303 {
304 	g_init_complete_called = false;
305 	setup_test();
306 	CU_ASSERT(g_init_complete_called == true);
307 
308 	set_thread(0);
309 
310 	g_get_io_channel = false;
311 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
312 	CU_ASSERT(g_ut_threads[0].ch == NULL);
313 
314 	g_get_io_channel = true;
315 	g_create_ch = false;
316 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
317 	CU_ASSERT(g_ut_threads[0].ch == NULL);
318 
319 	g_get_io_channel = true;
320 	g_create_ch = true;
321 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
322 	CU_ASSERT(g_ut_threads[0].ch != NULL);
323 	spdk_put_io_channel(g_ut_threads[0].ch);
324 
325 	g_fini_start_called = false;
326 	teardown_test();
327 	CU_ASSERT(g_fini_start_called == true);
328 }
329 
330 static void
331 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
332 {
333 	bool *done = cb_arg;
334 
335 	CU_ASSERT(success == true);
336 	*done = true;
337 	spdk_bdev_free_io(bdev_io);
338 }
339 
340 static void
341 put_channel_during_reset(void)
342 {
343 	struct spdk_io_channel *io_ch;
344 	bool done = false;
345 
346 	setup_test();
347 
348 	set_thread(0);
349 	io_ch = spdk_bdev_get_io_channel(g_desc);
350 	CU_ASSERT(io_ch != NULL);
351 
352 	/*
353 	 * Start a reset, but then put the I/O channel before
354 	 *  the deferred messages for the reset get a chance to
355 	 *  execute.
356 	 */
357 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
358 	spdk_put_io_channel(io_ch);
359 	poll_threads();
360 	stub_complete_io(g_bdev.io_target, 0);
361 
362 	teardown_test();
363 }
364 
365 static void
366 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
367 {
368 	enum spdk_bdev_io_status *status = cb_arg;
369 
370 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
371 	spdk_bdev_free_io(bdev_io);
372 }
373 
374 static void
375 aborted_reset(void)
376 {
377 	struct spdk_io_channel *io_ch[2];
378 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
379 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
380 
381 	setup_test();
382 
383 	set_thread(0);
384 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
385 	CU_ASSERT(io_ch[0] != NULL);
386 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
387 	poll_threads();
388 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
389 
390 	/*
391 	 * First reset has been submitted on ch0.  Now submit a second
392 	 *  reset on ch1 which will get queued since there is already a
393 	 *  reset in progress.
394 	 */
395 	set_thread(1);
396 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
397 	CU_ASSERT(io_ch[1] != NULL);
398 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
399 	poll_threads();
400 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
401 
402 	/*
403 	 * Now destroy ch1.  This will abort the queued reset.  Check that
404 	 *  the second reset was completed with failed status.  Also check
405 	 *  that bdev->internal.reset_in_progress != NULL, since the
406 	 *  original reset has not been completed yet.  This ensures that
407 	 *  the bdev code is correctly noticing that the failed reset is
408 	 *  *not* the one that had been submitted to the bdev module.
409 	 */
410 	set_thread(1);
411 	spdk_put_io_channel(io_ch[1]);
412 	poll_threads();
413 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
414 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
415 
416 	/*
417 	 * Now complete the first reset, verify that it completed with SUCCESS
418 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
419 	 */
420 	set_thread(0);
421 	spdk_put_io_channel(io_ch[0]);
422 	stub_complete_io(g_bdev.io_target, 0);
423 	poll_threads();
424 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
425 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
426 
427 	teardown_test();
428 }
429 
430 static void
431 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
432 {
433 	enum spdk_bdev_io_status *status = cb_arg;
434 
435 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
436 	spdk_bdev_free_io(bdev_io);
437 }
438 
439 static void
440 io_during_reset(void)
441 {
442 	struct spdk_io_channel *io_ch[2];
443 	struct spdk_bdev_channel *bdev_ch[2];
444 	enum spdk_bdev_io_status status0, status1, status_reset;
445 	int rc;
446 
447 	setup_test();
448 
449 	/*
450 	 * First test normal case - submit an I/O on each of two channels (with no resets)
451 	 *  and verify they complete successfully.
452 	 */
453 	set_thread(0);
454 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
455 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
456 	CU_ASSERT(bdev_ch[0]->flags == 0);
457 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
458 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
459 	CU_ASSERT(rc == 0);
460 
461 	set_thread(1);
462 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
463 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
464 	CU_ASSERT(bdev_ch[1]->flags == 0);
465 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
466 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
467 	CU_ASSERT(rc == 0);
468 
469 	poll_threads();
470 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
471 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
472 
473 	set_thread(0);
474 	stub_complete_io(g_bdev.io_target, 0);
475 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
476 
477 	set_thread(1);
478 	stub_complete_io(g_bdev.io_target, 0);
479 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
480 
481 	/*
482 	 * Now submit a reset, and leave it pending while we submit I/O on two different
483 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
484 	 *  progress.
485 	 */
486 	set_thread(0);
487 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
488 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
489 	CU_ASSERT(rc == 0);
490 
491 	CU_ASSERT(bdev_ch[0]->flags == 0);
492 	CU_ASSERT(bdev_ch[1]->flags == 0);
493 	poll_threads();
494 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
495 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
496 
497 	set_thread(0);
498 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
499 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
500 	CU_ASSERT(rc == 0);
501 
502 	set_thread(1);
503 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
504 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
505 	CU_ASSERT(rc == 0);
506 
507 	/*
508 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
509 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
510 	 */
511 	poll_threads();
512 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
513 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
514 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
515 
516 	/*
517 	 * Complete the reset
518 	 */
519 	set_thread(0);
520 	stub_complete_io(g_bdev.io_target, 0);
521 
522 	/*
523 	 * Only poll thread 0. We should not get a completion.
524 	 */
525 	poll_thread(0);
526 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
527 
528 	/*
529 	 * Poll both thread 0 and 1 so the messages can propagate and we
530 	 * get a completion.
531 	 */
532 	poll_threads();
533 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
534 
535 	spdk_put_io_channel(io_ch[0]);
536 	set_thread(1);
537 	spdk_put_io_channel(io_ch[1]);
538 	poll_threads();
539 
540 	teardown_test();
541 }
542 
543 static void
544 basic_qos(void)
545 {
546 	struct spdk_io_channel *io_ch[2];
547 	struct spdk_bdev_channel *bdev_ch[2];
548 	struct spdk_bdev *bdev;
549 	enum spdk_bdev_io_status status;
550 	int rc;
551 
552 	setup_test();
553 
554 	/* Enable QoS */
555 	bdev = &g_bdev.bdev;
556 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
557 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
558 	TAILQ_INIT(&bdev->internal.qos->queued);
559 	/*
560 	 * Enable both IOPS and bandwidth rate limits.
561 	 * In this case, both rate limits will take equal effect.
562 	 */
563 	bdev->internal.qos->iops_rate_limit = 2000; /* 2 I/O per millisecond */
564 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
565 
566 	g_get_io_channel = true;
567 
568 	set_thread(0);
569 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
570 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
571 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
572 
573 	set_thread(1);
574 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
575 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
576 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
577 
578 	/*
579 	 * Send an I/O on thread 0, which is where the QoS thread is running.
580 	 */
581 	set_thread(0);
582 	status = SPDK_BDEV_IO_STATUS_PENDING;
583 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
584 	CU_ASSERT(rc == 0);
585 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
586 	poll_threads();
587 	stub_complete_io(g_bdev.io_target, 0);
588 	poll_threads();
589 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
590 
591 	/* Send an I/O on thread 1. The QoS thread is not running here. */
592 	status = SPDK_BDEV_IO_STATUS_PENDING;
593 	set_thread(1);
594 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
595 	CU_ASSERT(rc == 0);
596 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
597 	poll_threads();
598 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
599 	stub_complete_io(g_bdev.io_target, 0);
600 	poll_threads();
601 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
602 	/* Now complete I/O on thread 0 */
603 	set_thread(0);
604 	poll_threads();
605 	stub_complete_io(g_bdev.io_target, 0);
606 	poll_threads();
607 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
608 
609 	/* Tear down the channels */
610 	set_thread(0);
611 	spdk_put_io_channel(io_ch[0]);
612 	set_thread(1);
613 	spdk_put_io_channel(io_ch[1]);
614 	poll_threads();
615 	set_thread(0);
616 
617 	/* Close the descriptor, which should stop the qos channel */
618 	spdk_bdev_close(g_desc);
619 	poll_threads();
620 	CU_ASSERT(bdev->internal.qos->ch == NULL);
621 
622 	spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
623 
624 	/* Create the channels in reverse order. */
625 	set_thread(1);
626 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
627 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
628 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
629 
630 	set_thread(0);
631 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
632 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
633 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
634 
635 	/* Confirm that the qos thread is now thread 1 */
636 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
637 
638 	/* Tear down the channels */
639 	set_thread(0);
640 	spdk_put_io_channel(io_ch[0]);
641 	set_thread(1);
642 	spdk_put_io_channel(io_ch[1]);
643 	poll_threads();
644 
645 	set_thread(0);
646 
647 	teardown_test();
648 }
649 
650 static void
651 io_during_qos_queue(void)
652 {
653 	struct spdk_io_channel *io_ch[2];
654 	struct spdk_bdev_channel *bdev_ch[2];
655 	struct spdk_bdev *bdev;
656 	enum spdk_bdev_io_status status0, status1;
657 	int rc;
658 
659 	setup_test();
660 	reset_time();
661 
662 	/* Enable QoS */
663 	bdev = &g_bdev.bdev;
664 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
665 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
666 	TAILQ_INIT(&bdev->internal.qos->queued);
667 	/*
668 	 * Enable both IOPS and bandwidth rate limits.
669 	 * In this case, IOPS rate limit will take effect first.
670 	 */
671 	bdev->internal.qos->iops_rate_limit = 1000; /* 1000 I/O per second, or 1 per millisecond */
672 	bdev->internal.qos->byte_rate_limit = 8192000; /* 8K byte per millisecond with 4K block size */
673 
674 	g_get_io_channel = true;
675 
676 	/* Create channels */
677 	set_thread(0);
678 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
679 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
680 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
681 
682 	set_thread(1);
683 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
684 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
685 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
686 
687 	/* Send two I/O */
688 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
689 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
690 	CU_ASSERT(rc == 0);
691 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
692 	set_thread(0);
693 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
694 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
695 	CU_ASSERT(rc == 0);
696 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
697 
698 	/* Complete any I/O that arrived at the disk */
699 	poll_threads();
700 	set_thread(1);
701 	stub_complete_io(g_bdev.io_target, 0);
702 	set_thread(0);
703 	stub_complete_io(g_bdev.io_target, 0);
704 	poll_threads();
705 
706 	/* Only one of the I/O should complete. (logical XOR) */
707 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
708 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
709 	} else {
710 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
711 	}
712 
713 	/* Advance in time by a millisecond */
714 	increment_time(1000);
715 
716 	/* Complete more I/O */
717 	poll_threads();
718 	set_thread(1);
719 	stub_complete_io(g_bdev.io_target, 0);
720 	set_thread(0);
721 	stub_complete_io(g_bdev.io_target, 0);
722 	poll_threads();
723 
724 	/* Now the second I/O should be done */
725 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
726 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
727 
728 	/* Tear down the channels */
729 	set_thread(1);
730 	spdk_put_io_channel(io_ch[1]);
731 	set_thread(0);
732 	spdk_put_io_channel(io_ch[0]);
733 	poll_threads();
734 
735 	teardown_test();
736 }
737 
738 static void
739 io_during_qos_reset(void)
740 {
741 	struct spdk_io_channel *io_ch[2];
742 	struct spdk_bdev_channel *bdev_ch[2];
743 	struct spdk_bdev *bdev;
744 	enum spdk_bdev_io_status status0, status1, reset_status;
745 	int rc;
746 
747 	setup_test();
748 	reset_time();
749 
750 	/* Enable QoS */
751 	bdev = &g_bdev.bdev;
752 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
753 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
754 	TAILQ_INIT(&bdev->internal.qos->queued);
755 	/*
756 	 * Enable both IOPS and bandwidth rate limits.
757 	 * In this case, bandwidth rate limit will take effect first.
758 	 */
759 	bdev->internal.qos->iops_rate_limit = 2000; /* 2000 I/O per second, or 2 per millisecond */
760 	bdev->internal.qos->byte_rate_limit = 4096000; /* 4K byte per millisecond with 4K block size */
761 
762 	g_get_io_channel = true;
763 
764 	/* Create channels */
765 	set_thread(0);
766 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
767 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
768 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
769 
770 	set_thread(1);
771 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
772 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
773 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
774 
775 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
776 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
777 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
778 	CU_ASSERT(rc == 0);
779 	set_thread(0);
780 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
781 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
782 	CU_ASSERT(rc == 0);
783 
784 	poll_threads();
785 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
786 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
787 
788 	/* Reset the bdev. */
789 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
790 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
791 	CU_ASSERT(rc == 0);
792 
793 	/* Complete any I/O that arrived at the disk */
794 	poll_threads();
795 	set_thread(1);
796 	stub_complete_io(g_bdev.io_target, 0);
797 	set_thread(0);
798 	stub_complete_io(g_bdev.io_target, 0);
799 	poll_threads();
800 
801 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
802 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
803 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
804 
805 	/* Tear down the channels */
806 	set_thread(1);
807 	spdk_put_io_channel(io_ch[1]);
808 	set_thread(0);
809 	spdk_put_io_channel(io_ch[0]);
810 	poll_threads();
811 
812 	teardown_test();
813 }
814 
815 static void
816 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
817 {
818 	enum spdk_bdev_io_status *status = cb_arg;
819 
820 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
821 	spdk_bdev_free_io(bdev_io);
822 }
823 
824 static void
825 enomem(void)
826 {
827 	struct spdk_io_channel *io_ch;
828 	struct spdk_bdev_channel *bdev_ch;
829 	struct spdk_bdev_shared_resource *shared_resource;
830 	struct ut_bdev_channel *ut_ch;
831 	const uint32_t IO_ARRAY_SIZE = 64;
832 	const uint32_t AVAIL = 20;
833 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
834 	uint32_t nomem_cnt, i;
835 	struct spdk_bdev_io *first_io;
836 	int rc;
837 
838 	setup_test();
839 
840 	set_thread(0);
841 	io_ch = spdk_bdev_get_io_channel(g_desc);
842 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
843 	shared_resource = bdev_ch->shared_resource;
844 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
845 	ut_ch->avail_cnt = AVAIL;
846 
847 	/* First submit a number of IOs equal to what the channel can support. */
848 	for (i = 0; i < AVAIL; i++) {
849 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
850 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
851 		CU_ASSERT(rc == 0);
852 	}
853 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
854 
855 	/*
856 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
857 	 *  the enomem_io list.
858 	 */
859 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
860 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
861 	CU_ASSERT(rc == 0);
862 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
863 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
864 
865 	/*
866 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
867 	 *  the first_io above.
868 	 */
869 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
870 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
871 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
872 		CU_ASSERT(rc == 0);
873 	}
874 
875 	/* Assert that first_io is still at the head of the list. */
876 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
877 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
878 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
879 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
880 
881 	/*
882 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
883 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
884 	 *  list.
885 	 */
886 	stub_complete_io(g_bdev.io_target, 1);
887 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
888 
889 	/*
890 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
891 	 *  and we should see I/O get resubmitted to the test bdev module.
892 	 */
893 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
894 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
895 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
896 
897 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
898 	stub_complete_io(g_bdev.io_target, 1);
899 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
900 
901 	/*
902 	 * Send a reset and confirm that all I/O are completed, including the ones that
903 	 *  were queued on the nomem_io list.
904 	 */
905 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
906 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
907 	poll_threads();
908 	CU_ASSERT(rc == 0);
909 	/* This will complete the reset. */
910 	stub_complete_io(g_bdev.io_target, 0);
911 
912 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
913 	CU_ASSERT(shared_resource->io_outstanding == 0);
914 
915 	spdk_put_io_channel(io_ch);
916 	poll_threads();
917 	teardown_test();
918 }
919 
920 static void
921 enomem_multi_bdev(void)
922 {
923 	struct spdk_io_channel *io_ch;
924 	struct spdk_bdev_channel *bdev_ch;
925 	struct spdk_bdev_shared_resource *shared_resource;
926 	struct ut_bdev_channel *ut_ch;
927 	const uint32_t IO_ARRAY_SIZE = 64;
928 	const uint32_t AVAIL = 20;
929 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
930 	uint32_t i;
931 	struct ut_bdev *second_bdev;
932 	struct spdk_bdev_desc *second_desc;
933 	struct spdk_bdev_channel *second_bdev_ch;
934 	struct spdk_io_channel *second_ch;
935 	int rc;
936 
937 	setup_test();
938 
939 	/* Register second bdev with the same io_target  */
940 	second_bdev = calloc(1, sizeof(*second_bdev));
941 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
942 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
943 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
944 
945 	set_thread(0);
946 	io_ch = spdk_bdev_get_io_channel(g_desc);
947 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
948 	shared_resource = bdev_ch->shared_resource;
949 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
950 	ut_ch->avail_cnt = AVAIL;
951 
952 	second_ch = spdk_bdev_get_io_channel(second_desc);
953 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
954 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
955 
956 	/* Saturate io_target through bdev A. */
957 	for (i = 0; i < AVAIL; i++) {
958 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
959 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
960 		CU_ASSERT(rc == 0);
961 	}
962 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
963 
964 	/*
965 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
966 	 * and then go onto the nomem_io list.
967 	 */
968 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
969 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
970 	CU_ASSERT(rc == 0);
971 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
972 
973 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
974 	stub_complete_io(g_bdev.io_target, AVAIL);
975 
976 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
977 	CU_ASSERT(shared_resource->io_outstanding == 1);
978 
979 	/* Now complete our retried I/O  */
980 	stub_complete_io(g_bdev.io_target, 1);
981 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
982 
983 	spdk_put_io_channel(io_ch);
984 	spdk_put_io_channel(second_ch);
985 	spdk_bdev_close(second_desc);
986 	unregister_bdev(second_bdev);
987 	poll_threads();
988 	free(second_bdev);
989 	teardown_test();
990 }
991 
992 
993 static void
994 enomem_multi_io_target(void)
995 {
996 	struct spdk_io_channel *io_ch;
997 	struct spdk_bdev_channel *bdev_ch;
998 	struct ut_bdev_channel *ut_ch;
999 	const uint32_t IO_ARRAY_SIZE = 64;
1000 	const uint32_t AVAIL = 20;
1001 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1002 	uint32_t i;
1003 	int new_io_device;
1004 	struct ut_bdev *second_bdev;
1005 	struct spdk_bdev_desc *second_desc = NULL;
1006 	struct spdk_bdev_channel *second_bdev_ch;
1007 	struct spdk_io_channel *second_ch;
1008 	int rc;
1009 
1010 	setup_test();
1011 
1012 	/* Create new io_target and a second bdev using it */
1013 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1014 				sizeof(struct ut_bdev_channel));
1015 	second_bdev = calloc(1, sizeof(*second_bdev));
1016 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1017 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1018 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1019 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1020 
1021 	set_thread(0);
1022 	io_ch = spdk_bdev_get_io_channel(g_desc);
1023 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1024 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1025 	ut_ch->avail_cnt = AVAIL;
1026 
1027 	/* Different io_target should imply a different shared_resource */
1028 	second_ch = spdk_bdev_get_io_channel(second_desc);
1029 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1030 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1031 
1032 	/* Saturate io_target through bdev A. */
1033 	for (i = 0; i < AVAIL; i++) {
1034 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1035 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1036 		CU_ASSERT(rc == 0);
1037 	}
1038 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1039 
1040 	/* Issue one more I/O to fill ENOMEM list. */
1041 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1042 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1043 	CU_ASSERT(rc == 0);
1044 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1045 
1046 	/*
1047 	 * Now submit I/O through the second bdev. This should go through and complete
1048 	 * successfully because we're using a different io_device underneath.
1049 	 */
1050 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1051 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1052 	CU_ASSERT(rc == 0);
1053 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1054 	stub_complete_io(second_bdev->io_target, 1);
1055 
1056 	/* Cleanup; Complete outstanding I/O. */
1057 	stub_complete_io(g_bdev.io_target, AVAIL);
1058 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1059 	/* Complete the ENOMEM I/O */
1060 	stub_complete_io(g_bdev.io_target, 1);
1061 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1062 
1063 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1064 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1065 	spdk_put_io_channel(io_ch);
1066 	spdk_put_io_channel(second_ch);
1067 	spdk_bdev_close(second_desc);
1068 	unregister_bdev(second_bdev);
1069 	spdk_io_device_unregister(&new_io_device, NULL);
1070 	poll_threads();
1071 	free(second_bdev);
1072 	teardown_test();
1073 }
1074 
1075 static void
1076 qos_dynamic_enable_done(void *cb_arg, int status)
1077 {
1078 	int *rc = cb_arg;
1079 	*rc = status;
1080 }
1081 
1082 static void
1083 qos_dynamic_enable(void)
1084 {
1085 	struct spdk_io_channel *io_ch[2];
1086 	struct spdk_bdev_channel *bdev_ch[2];
1087 	struct spdk_bdev *bdev;
1088 	enum spdk_bdev_io_status bdev_io_status[2];
1089 	int status, second_status, rc, i;
1090 
1091 	setup_test();
1092 	reset_time();
1093 
1094 	bdev = &g_bdev.bdev;
1095 
1096 	g_get_io_channel = true;
1097 
1098 	/* Create channels */
1099 	set_thread(0);
1100 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1101 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1102 	CU_ASSERT(bdev_ch[0]->flags == 0);
1103 
1104 	set_thread(1);
1105 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1106 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1107 	CU_ASSERT(bdev_ch[1]->flags == 0);
1108 
1109 	set_thread(0);
1110 
1111 	/* Enable QoS */
1112 	status = -1;
1113 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1114 	poll_threads();
1115 	CU_ASSERT(status == 0);
1116 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1117 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1118 
1119 	/*
1120 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1121 	 * Additional I/O will then be queued.
1122 	 */
1123 	set_thread(0);
1124 	for (i = 0; i < 10; i++) {
1125 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1126 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1127 		CU_ASSERT(rc == 0);
1128 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1129 		poll_thread(0);
1130 		stub_complete_io(g_bdev.io_target, 0);
1131 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1132 	}
1133 
1134 	/*
1135 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1136 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1137 	 *  1) are not aborted
1138 	 *  2) are sent back to their original thread for resubmission
1139 	 */
1140 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1141 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1142 	CU_ASSERT(rc == 0);
1143 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1144 	set_thread(1);
1145 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1146 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1147 	CU_ASSERT(rc == 0);
1148 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1149 	poll_threads();
1150 
1151 	/* Disable QoS */
1152 	status = -1;
1153 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1154 	poll_threads();
1155 	CU_ASSERT(status == 0);
1156 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1157 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1158 
1159 	/*
1160 	 * All I/O should have been resubmitted back on their original thread.  Complete
1161 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1162 	 */
1163 	set_thread(0);
1164 	stub_complete_io(g_bdev.io_target, 0);
1165 	poll_threads();
1166 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1167 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1168 
1169 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1170 	set_thread(1);
1171 	stub_complete_io(g_bdev.io_target, 0);
1172 	poll_threads();
1173 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1174 
1175 	/* Disable QoS again */
1176 	status = -1;
1177 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1178 	poll_threads();
1179 	CU_ASSERT(status == 0); /* This should succeed */
1180 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1181 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1182 
1183 	/* Enable QoS on thread 0 */
1184 	status = -1;
1185 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1186 	poll_threads();
1187 	CU_ASSERT(status == 0);
1188 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1189 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1190 
1191 	/* Disable QoS on thread 1 */
1192 	set_thread(1);
1193 	status = -1;
1194 	spdk_bdev_set_qos_limit_iops(bdev, 0, qos_dynamic_enable_done, &status);
1195 	/* Don't poll yet. This should leave the channels with QoS enabled */
1196 	CU_ASSERT(status == -1);
1197 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1198 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1199 
1200 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1201 	second_status = 0;
1202 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &second_status);
1203 	poll_threads();
1204 	CU_ASSERT(status == 0); /* The disable should succeed */
1205 	CU_ASSERT(second_status < 0); /* The enable should fail */
1206 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1207 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1208 
1209 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1210 	status = -1;
1211 	spdk_bdev_set_qos_limit_iops(bdev, 10000, qos_dynamic_enable_done, &status);
1212 	poll_threads();
1213 	CU_ASSERT(status == 0);
1214 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1215 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1216 
1217 	/* Tear down the channels */
1218 	set_thread(0);
1219 	spdk_put_io_channel(io_ch[0]);
1220 	set_thread(1);
1221 	spdk_put_io_channel(io_ch[1]);
1222 	poll_threads();
1223 
1224 	set_thread(0);
1225 	teardown_test();
1226 }
1227 
1228 int
1229 main(int argc, char **argv)
1230 {
1231 	CU_pSuite	suite = NULL;
1232 	unsigned int	num_failures;
1233 
1234 	if (CU_initialize_registry() != CUE_SUCCESS) {
1235 		return CU_get_error();
1236 	}
1237 
1238 	suite = CU_add_suite("bdev", NULL, NULL);
1239 	if (suite == NULL) {
1240 		CU_cleanup_registry();
1241 		return CU_get_error();
1242 	}
1243 
1244 	if (
1245 		CU_add_test(suite, "basic", basic) == NULL ||
1246 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1247 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1248 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1249 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1250 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1251 		CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
1252 		CU_add_test(suite, "enomem", enomem) == NULL ||
1253 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
1254 		CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
1255 		CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
1256 	) {
1257 		CU_cleanup_registry();
1258 		return CU_get_error();
1259 	}
1260 
1261 	CU_basic_set_mode(CU_BRM_VERBOSE);
1262 	CU_basic_run_tests();
1263 	num_failures = CU_get_number_of_failures();
1264 	CU_cleanup_registry();
1265 	return num_failures;
1266 }
1267