xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 5fd9561f54daa8eff7f3bcb56c789655bca846b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "common/lib/ut_multithread.c"
37 #include "unit/lib/json_mock.c"
38 
39 #include "spdk/config.h"
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
48 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
49 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
50 		int *asc, int *ascq));
51 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
52 	    "test_domain");
53 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
54 	    (struct spdk_memory_domain *domain), 0);
55 
56 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
57 int
58 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
59 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
60 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
61 {
62 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
63 
64 	cpl_cb(cpl_cb_arg, 0);
65 	return 0;
66 }
67 
68 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
69 int
70 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
71 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
72 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
73 {
74 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
75 
76 	cpl_cb(cpl_cb_arg, 0);
77 	return 0;
78 }
79 
80 struct ut_bdev {
81 	struct spdk_bdev	bdev;
82 	void			*io_target;
83 };
84 
85 struct ut_bdev_channel {
86 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
87 	uint32_t			outstanding_cnt;
88 	uint32_t			avail_cnt;
89 };
90 
91 int g_io_device;
92 struct ut_bdev g_bdev;
93 struct spdk_bdev_desc *g_desc;
94 bool g_teardown_done = false;
95 bool g_get_io_channel = true;
96 bool g_create_ch = true;
97 bool g_init_complete_called = false;
98 bool g_fini_start_called = true;
99 int g_status = 0;
100 int g_count = 0;
101 struct spdk_histogram_data *g_histogram = NULL;
102 
103 static int
104 stub_create_ch(void *io_device, void *ctx_buf)
105 {
106 	struct ut_bdev_channel *ch = ctx_buf;
107 
108 	if (g_create_ch == false) {
109 		return -1;
110 	}
111 
112 	TAILQ_INIT(&ch->outstanding_io);
113 	ch->outstanding_cnt = 0;
114 	/*
115 	 * When avail gets to 0, the submit_request function will return ENOMEM.
116 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
117 	 *  big value that won't get hit.  The ENOMEM tests can then override this
118 	 *  value to something much smaller to induce ENOMEM conditions.
119 	 */
120 	ch->avail_cnt = 2048;
121 	return 0;
122 }
123 
124 static void
125 stub_destroy_ch(void *io_device, void *ctx_buf)
126 {
127 }
128 
129 static struct spdk_io_channel *
130 stub_get_io_channel(void *ctx)
131 {
132 	struct ut_bdev *ut_bdev = ctx;
133 
134 	if (g_get_io_channel == true) {
135 		return spdk_get_io_channel(ut_bdev->io_target);
136 	} else {
137 		return NULL;
138 	}
139 }
140 
141 static int
142 stub_destruct(void *ctx)
143 {
144 	return 0;
145 }
146 
147 static void
148 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
149 {
150 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
151 	struct spdk_bdev_io *io;
152 
153 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
154 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
155 			io = TAILQ_FIRST(&ch->outstanding_io);
156 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
157 			ch->outstanding_cnt--;
158 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
159 			ch->avail_cnt++;
160 		}
161 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
162 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
163 			if (io == bdev_io->u.abort.bio_to_abort) {
164 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
165 				ch->outstanding_cnt--;
166 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
167 				ch->avail_cnt++;
168 
169 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
170 				return;
171 			}
172 		}
173 
174 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
175 		return;
176 	}
177 
178 	if (ch->avail_cnt > 0) {
179 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
180 		ch->outstanding_cnt++;
181 		ch->avail_cnt--;
182 	} else {
183 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
184 	}
185 }
186 
187 static uint32_t
188 stub_complete_io(void *io_target, uint32_t num_to_complete)
189 {
190 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
191 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
192 	struct spdk_bdev_io *io;
193 	bool complete_all = (num_to_complete == 0);
194 	uint32_t num_completed = 0;
195 
196 	while (complete_all || num_completed < num_to_complete) {
197 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
198 			break;
199 		}
200 		io = TAILQ_FIRST(&ch->outstanding_io);
201 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
202 		ch->outstanding_cnt--;
203 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
204 		ch->avail_cnt++;
205 		num_completed++;
206 	}
207 	spdk_put_io_channel(_ch);
208 	return num_completed;
209 }
210 
211 static bool
212 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
213 {
214 	return true;
215 }
216 
217 static struct spdk_bdev_fn_table fn_table = {
218 	.get_io_channel =	stub_get_io_channel,
219 	.destruct =		stub_destruct,
220 	.submit_request =	stub_submit_request,
221 	.io_type_supported =	stub_io_type_supported,
222 };
223 
224 struct spdk_bdev_module bdev_ut_if;
225 
226 static int
227 module_init(void)
228 {
229 	spdk_bdev_module_init_done(&bdev_ut_if);
230 	return 0;
231 }
232 
233 static void
234 module_fini(void)
235 {
236 }
237 
238 static void
239 init_complete(void)
240 {
241 	g_init_complete_called = true;
242 }
243 
244 static void
245 fini_start(void)
246 {
247 	g_fini_start_called = true;
248 }
249 
250 struct spdk_bdev_module bdev_ut_if = {
251 	.name = "bdev_ut",
252 	.module_init = module_init,
253 	.module_fini = module_fini,
254 	.async_init = true,
255 	.init_complete = init_complete,
256 	.fini_start = fini_start,
257 };
258 
259 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
260 
261 static void
262 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
263 {
264 	memset(ut_bdev, 0, sizeof(*ut_bdev));
265 
266 	ut_bdev->io_target = io_target;
267 	ut_bdev->bdev.ctxt = ut_bdev;
268 	ut_bdev->bdev.name = name;
269 	ut_bdev->bdev.fn_table = &fn_table;
270 	ut_bdev->bdev.module = &bdev_ut_if;
271 	ut_bdev->bdev.blocklen = 4096;
272 	ut_bdev->bdev.blockcnt = 1024;
273 
274 	spdk_bdev_register(&ut_bdev->bdev);
275 }
276 
277 static void
278 unregister_bdev(struct ut_bdev *ut_bdev)
279 {
280 	/* Handle any deferred messages. */
281 	poll_threads();
282 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
283 }
284 
285 static void
286 bdev_init_cb(void *done, int rc)
287 {
288 	CU_ASSERT(rc == 0);
289 	*(bool *)done = true;
290 }
291 
292 static void
293 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
294 	       void *event_ctx)
295 {
296 	switch (type) {
297 	case SPDK_BDEV_EVENT_REMOVE:
298 		if (event_ctx != NULL) {
299 			*(bool *)event_ctx = true;
300 		}
301 		break;
302 	default:
303 		CU_ASSERT(false);
304 		break;
305 	}
306 }
307 
308 static void
309 setup_test(void)
310 {
311 	bool done = false;
312 
313 	allocate_cores(BDEV_UT_NUM_THREADS);
314 	allocate_threads(BDEV_UT_NUM_THREADS);
315 	set_thread(0);
316 	spdk_bdev_initialize(bdev_init_cb, &done);
317 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
318 				sizeof(struct ut_bdev_channel), NULL);
319 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
320 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
321 }
322 
323 static void
324 finish_cb(void *cb_arg)
325 {
326 	g_teardown_done = true;
327 }
328 
329 static void
330 teardown_test(void)
331 {
332 	set_thread(0);
333 	g_teardown_done = false;
334 	spdk_bdev_close(g_desc);
335 	g_desc = NULL;
336 	unregister_bdev(&g_bdev);
337 	spdk_io_device_unregister(&g_io_device, NULL);
338 	spdk_bdev_finish(finish_cb, NULL);
339 	poll_threads();
340 	memset(&g_bdev, 0, sizeof(g_bdev));
341 	CU_ASSERT(g_teardown_done == true);
342 	g_teardown_done = false;
343 	free_threads();
344 	free_cores();
345 }
346 
347 static uint32_t
348 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
349 {
350 	struct spdk_bdev_io *io;
351 	uint32_t cnt = 0;
352 
353 	TAILQ_FOREACH(io, tailq, internal.link) {
354 		cnt++;
355 	}
356 
357 	return cnt;
358 }
359 
360 static void
361 basic(void)
362 {
363 	g_init_complete_called = false;
364 	setup_test();
365 	CU_ASSERT(g_init_complete_called == true);
366 
367 	set_thread(0);
368 
369 	g_get_io_channel = false;
370 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
371 	CU_ASSERT(g_ut_threads[0].ch == NULL);
372 
373 	g_get_io_channel = true;
374 	g_create_ch = false;
375 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
376 	CU_ASSERT(g_ut_threads[0].ch == NULL);
377 
378 	g_get_io_channel = true;
379 	g_create_ch = true;
380 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
381 	CU_ASSERT(g_ut_threads[0].ch != NULL);
382 	spdk_put_io_channel(g_ut_threads[0].ch);
383 
384 	g_fini_start_called = false;
385 	teardown_test();
386 	CU_ASSERT(g_fini_start_called == true);
387 }
388 
389 static void
390 _bdev_unregistered(void *done, int rc)
391 {
392 	CU_ASSERT(rc == 0);
393 	*(bool *)done = true;
394 }
395 
396 static void
397 unregister_and_close(void)
398 {
399 	bool done, remove_notify;
400 	struct spdk_bdev_desc *desc = NULL;
401 
402 	setup_test();
403 	set_thread(0);
404 
405 	/* setup_test() automatically opens the bdev,
406 	 * but this test needs to do that in a different
407 	 * way. */
408 	spdk_bdev_close(g_desc);
409 	poll_threads();
410 
411 	/* Try hotremoving a bdev with descriptors which don't provide
412 	 * any context to the notification callback */
413 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
414 	SPDK_CU_ASSERT_FATAL(desc != NULL);
415 
416 	/* There is an open descriptor on the device. Unregister it,
417 	 * which can't proceed until the descriptor is closed. */
418 	done = false;
419 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
420 
421 	/* Poll the threads to allow all events to be processed */
422 	poll_threads();
423 
424 	/* Make sure the bdev was not unregistered. We still have a
425 	 * descriptor open */
426 	CU_ASSERT(done == false);
427 
428 	spdk_bdev_close(desc);
429 	poll_threads();
430 	desc = NULL;
431 
432 	/* The unregister should have completed */
433 	CU_ASSERT(done == true);
434 
435 
436 	/* Register the bdev again */
437 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
438 
439 	remove_notify = false;
440 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
441 	SPDK_CU_ASSERT_FATAL(desc != NULL);
442 	CU_ASSERT(remove_notify == false);
443 
444 	/* There is an open descriptor on the device. Unregister it,
445 	 * which can't proceed until the descriptor is closed. */
446 	done = false;
447 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
448 	/* No polling has occurred, so neither of these should execute */
449 	CU_ASSERT(remove_notify == false);
450 	CU_ASSERT(done == false);
451 
452 	/* Prior to the unregister completing, close the descriptor */
453 	spdk_bdev_close(desc);
454 
455 	/* Poll the threads to allow all events to be processed */
456 	poll_threads();
457 
458 	/* Remove notify should not have been called because the
459 	 * descriptor is already closed. */
460 	CU_ASSERT(remove_notify == false);
461 
462 	/* The unregister should have completed */
463 	CU_ASSERT(done == true);
464 
465 	/* Restore the original g_bdev so that we can use teardown_test(). */
466 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
467 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
468 	teardown_test();
469 }
470 
471 static void
472 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
473 {
474 	bool *done = cb_arg;
475 
476 	CU_ASSERT(success == true);
477 	*done = true;
478 	spdk_bdev_free_io(bdev_io);
479 }
480 
481 static void
482 put_channel_during_reset(void)
483 {
484 	struct spdk_io_channel *io_ch;
485 	bool done = false;
486 
487 	setup_test();
488 
489 	set_thread(0);
490 	io_ch = spdk_bdev_get_io_channel(g_desc);
491 	CU_ASSERT(io_ch != NULL);
492 
493 	/*
494 	 * Start a reset, but then put the I/O channel before
495 	 *  the deferred messages for the reset get a chance to
496 	 *  execute.
497 	 */
498 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
499 	spdk_put_io_channel(io_ch);
500 	poll_threads();
501 	stub_complete_io(g_bdev.io_target, 0);
502 
503 	teardown_test();
504 }
505 
506 static void
507 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
508 {
509 	enum spdk_bdev_io_status *status = cb_arg;
510 
511 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
512 	spdk_bdev_free_io(bdev_io);
513 }
514 
515 static void
516 aborted_reset(void)
517 {
518 	struct spdk_io_channel *io_ch[2];
519 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
520 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
521 
522 	setup_test();
523 
524 	set_thread(0);
525 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
526 	CU_ASSERT(io_ch[0] != NULL);
527 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
528 	poll_threads();
529 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
530 
531 	/*
532 	 * First reset has been submitted on ch0.  Now submit a second
533 	 *  reset on ch1 which will get queued since there is already a
534 	 *  reset in progress.
535 	 */
536 	set_thread(1);
537 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
538 	CU_ASSERT(io_ch[1] != NULL);
539 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
540 	poll_threads();
541 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
542 
543 	/*
544 	 * Now destroy ch1.  This will abort the queued reset.  Check that
545 	 *  the second reset was completed with failed status.  Also check
546 	 *  that bdev->internal.reset_in_progress != NULL, since the
547 	 *  original reset has not been completed yet.  This ensures that
548 	 *  the bdev code is correctly noticing that the failed reset is
549 	 *  *not* the one that had been submitted to the bdev module.
550 	 */
551 	set_thread(1);
552 	spdk_put_io_channel(io_ch[1]);
553 	poll_threads();
554 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
555 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
556 
557 	/*
558 	 * Now complete the first reset, verify that it completed with SUCCESS
559 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
560 	 */
561 	set_thread(0);
562 	spdk_put_io_channel(io_ch[0]);
563 	stub_complete_io(g_bdev.io_target, 0);
564 	poll_threads();
565 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
566 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
567 
568 	teardown_test();
569 }
570 
571 static void
572 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
573 {
574 	enum spdk_bdev_io_status *status = cb_arg;
575 
576 	*status = bdev_io->internal.status;
577 	spdk_bdev_free_io(bdev_io);
578 }
579 
580 static void
581 io_during_reset(void)
582 {
583 	struct spdk_io_channel *io_ch[2];
584 	struct spdk_bdev_channel *bdev_ch[2];
585 	enum spdk_bdev_io_status status0, status1, status_reset;
586 	int rc;
587 
588 	setup_test();
589 
590 	/*
591 	 * First test normal case - submit an I/O on each of two channels (with no resets)
592 	 *  and verify they complete successfully.
593 	 */
594 	set_thread(0);
595 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
596 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
597 	CU_ASSERT(bdev_ch[0]->flags == 0);
598 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
599 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
600 	CU_ASSERT(rc == 0);
601 
602 	set_thread(1);
603 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
604 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
605 	CU_ASSERT(bdev_ch[1]->flags == 0);
606 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
607 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
608 	CU_ASSERT(rc == 0);
609 
610 	poll_threads();
611 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
612 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
613 
614 	set_thread(0);
615 	stub_complete_io(g_bdev.io_target, 0);
616 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
617 
618 	set_thread(1);
619 	stub_complete_io(g_bdev.io_target, 0);
620 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
621 
622 	/*
623 	 * Now submit a reset, and leave it pending while we submit I/O on two different
624 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
625 	 *  progress.
626 	 */
627 	set_thread(0);
628 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
629 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
630 	CU_ASSERT(rc == 0);
631 
632 	CU_ASSERT(bdev_ch[0]->flags == 0);
633 	CU_ASSERT(bdev_ch[1]->flags == 0);
634 	poll_threads();
635 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
636 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
637 
638 	set_thread(0);
639 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
640 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
641 	CU_ASSERT(rc == 0);
642 
643 	set_thread(1);
644 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
645 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
646 	CU_ASSERT(rc == 0);
647 
648 	/*
649 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
650 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
651 	 */
652 	poll_threads();
653 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
654 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
655 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
656 
657 	/*
658 	 * Complete the reset
659 	 */
660 	set_thread(0);
661 	stub_complete_io(g_bdev.io_target, 0);
662 
663 	/*
664 	 * Only poll thread 0. We should not get a completion.
665 	 */
666 	poll_thread(0);
667 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
668 
669 	/*
670 	 * Poll both thread 0 and 1 so the messages can propagate and we
671 	 * get a completion.
672 	 */
673 	poll_threads();
674 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
675 
676 	spdk_put_io_channel(io_ch[0]);
677 	set_thread(1);
678 	spdk_put_io_channel(io_ch[1]);
679 	poll_threads();
680 
681 	teardown_test();
682 }
683 
684 static void
685 basic_qos(void)
686 {
687 	struct spdk_io_channel *io_ch[2];
688 	struct spdk_bdev_channel *bdev_ch[2];
689 	struct spdk_bdev *bdev;
690 	enum spdk_bdev_io_status status, abort_status;
691 	int rc;
692 
693 	setup_test();
694 
695 	/* Enable QoS */
696 	bdev = &g_bdev.bdev;
697 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
698 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
699 	TAILQ_INIT(&bdev->internal.qos->queued);
700 	/*
701 	 * Enable read/write IOPS, read only byte per second and
702 	 * read/write byte per second rate limits.
703 	 * In this case, all rate limits will take equal effect.
704 	 */
705 	/* 2000 read/write I/O per second, or 2 per millisecond */
706 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
707 	/* 8K read/write byte per millisecond with 4K block size */
708 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
709 	/* 8K read only byte per millisecond with 4K block size */
710 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
711 
712 	g_get_io_channel = true;
713 
714 	set_thread(0);
715 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
716 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
717 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
718 
719 	set_thread(1);
720 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
721 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
722 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
723 
724 	/*
725 	 * Send an I/O on thread 0, which is where the QoS thread is running.
726 	 */
727 	set_thread(0);
728 	status = SPDK_BDEV_IO_STATUS_PENDING;
729 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
730 	CU_ASSERT(rc == 0);
731 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
732 	poll_threads();
733 	stub_complete_io(g_bdev.io_target, 0);
734 	poll_threads();
735 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
736 
737 	/* Send an I/O on thread 1. The QoS thread is not running here. */
738 	status = SPDK_BDEV_IO_STATUS_PENDING;
739 	set_thread(1);
740 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
741 	CU_ASSERT(rc == 0);
742 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
743 	poll_threads();
744 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
745 	stub_complete_io(g_bdev.io_target, 0);
746 	poll_threads();
747 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
748 	/* Now complete I/O on thread 0 */
749 	set_thread(0);
750 	poll_threads();
751 	stub_complete_io(g_bdev.io_target, 0);
752 	poll_threads();
753 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
754 
755 	/* Reset rate limit for the next test cases. */
756 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
757 	poll_threads();
758 
759 	/*
760 	 * Test abort request when QoS is enabled.
761 	 */
762 
763 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
764 	set_thread(0);
765 	status = SPDK_BDEV_IO_STATUS_PENDING;
766 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
767 	CU_ASSERT(rc == 0);
768 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
769 	/* Send an abort to the I/O on the same thread. */
770 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
771 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
772 	CU_ASSERT(rc == 0);
773 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
774 	poll_threads();
775 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
776 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
777 
778 	/* Send an I/O on thread 1. The QoS thread is not running here. */
779 	status = SPDK_BDEV_IO_STATUS_PENDING;
780 	set_thread(1);
781 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
782 	CU_ASSERT(rc == 0);
783 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
784 	poll_threads();
785 	/* Send an abort to the I/O on the same thread. */
786 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
787 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
788 	CU_ASSERT(rc == 0);
789 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
790 	poll_threads();
791 	/* Complete the I/O with failure and the abort with success on thread 1. */
792 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
793 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
794 
795 	set_thread(0);
796 
797 	/*
798 	 * Close the descriptor only, which should stop the qos channel as
799 	 * the last descriptor removed.
800 	 */
801 	spdk_bdev_close(g_desc);
802 	poll_threads();
803 	CU_ASSERT(bdev->internal.qos->ch == NULL);
804 
805 	/*
806 	 * Open the bdev again which shall setup the qos channel as the
807 	 * channels are valid.
808 	 */
809 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
810 	poll_threads();
811 	CU_ASSERT(bdev->internal.qos->ch != NULL);
812 
813 	/* Tear down the channels */
814 	set_thread(0);
815 	spdk_put_io_channel(io_ch[0]);
816 	set_thread(1);
817 	spdk_put_io_channel(io_ch[1]);
818 	poll_threads();
819 	set_thread(0);
820 
821 	/* Close the descriptor, which should stop the qos channel */
822 	spdk_bdev_close(g_desc);
823 	poll_threads();
824 	CU_ASSERT(bdev->internal.qos->ch == NULL);
825 
826 	/* Open the bdev again, no qos channel setup without valid channels. */
827 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
828 	poll_threads();
829 	CU_ASSERT(bdev->internal.qos->ch == NULL);
830 
831 	/* Create the channels in reverse order. */
832 	set_thread(1);
833 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
834 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
835 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
836 
837 	set_thread(0);
838 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
839 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
840 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
841 
842 	/* Confirm that the qos thread is now thread 1 */
843 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
844 
845 	/* Tear down the channels */
846 	set_thread(0);
847 	spdk_put_io_channel(io_ch[0]);
848 	set_thread(1);
849 	spdk_put_io_channel(io_ch[1]);
850 	poll_threads();
851 
852 	set_thread(0);
853 
854 	teardown_test();
855 }
856 
857 static void
858 io_during_qos_queue(void)
859 {
860 	struct spdk_io_channel *io_ch[2];
861 	struct spdk_bdev_channel *bdev_ch[2];
862 	struct spdk_bdev *bdev;
863 	enum spdk_bdev_io_status status0, status1, status2;
864 	int rc;
865 
866 	setup_test();
867 	MOCK_SET(spdk_get_ticks, 0);
868 
869 	/* Enable QoS */
870 	bdev = &g_bdev.bdev;
871 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
872 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
873 	TAILQ_INIT(&bdev->internal.qos->queued);
874 	/*
875 	 * Enable read/write IOPS, read only byte per sec, write only
876 	 * byte per sec and read/write byte per sec rate limits.
877 	 * In this case, both read only and write only byte per sec
878 	 * rate limit will take effect.
879 	 */
880 	/* 4000 read/write I/O per second, or 4 per millisecond */
881 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
882 	/* 8K byte per millisecond with 4K block size */
883 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
884 	/* 4K byte per millisecond with 4K block size */
885 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
886 	/* 4K byte per millisecond with 4K block size */
887 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
888 
889 	g_get_io_channel = true;
890 
891 	/* Create channels */
892 	set_thread(0);
893 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
894 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
895 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
896 
897 	set_thread(1);
898 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
899 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
900 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
901 
902 	/* Send two read I/Os */
903 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
904 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
905 	CU_ASSERT(rc == 0);
906 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
907 	set_thread(0);
908 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
909 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
910 	CU_ASSERT(rc == 0);
911 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
912 	/* Send one write I/O */
913 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
914 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
915 	CU_ASSERT(rc == 0);
916 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
917 
918 	/* Complete any I/O that arrived at the disk */
919 	poll_threads();
920 	set_thread(1);
921 	stub_complete_io(g_bdev.io_target, 0);
922 	set_thread(0);
923 	stub_complete_io(g_bdev.io_target, 0);
924 	poll_threads();
925 
926 	/* Only one of the two read I/Os should complete. (logical XOR) */
927 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
928 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
929 	} else {
930 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
931 	}
932 	/* The write I/O should complete. */
933 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
934 
935 	/* Advance in time by a millisecond */
936 	spdk_delay_us(1000);
937 
938 	/* Complete more I/O */
939 	poll_threads();
940 	set_thread(1);
941 	stub_complete_io(g_bdev.io_target, 0);
942 	set_thread(0);
943 	stub_complete_io(g_bdev.io_target, 0);
944 	poll_threads();
945 
946 	/* Now the second read I/O should be done */
947 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
948 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
949 
950 	/* Tear down the channels */
951 	set_thread(1);
952 	spdk_put_io_channel(io_ch[1]);
953 	set_thread(0);
954 	spdk_put_io_channel(io_ch[0]);
955 	poll_threads();
956 
957 	teardown_test();
958 }
959 
960 static void
961 io_during_qos_reset(void)
962 {
963 	struct spdk_io_channel *io_ch[2];
964 	struct spdk_bdev_channel *bdev_ch[2];
965 	struct spdk_bdev *bdev;
966 	enum spdk_bdev_io_status status0, status1, reset_status;
967 	int rc;
968 
969 	setup_test();
970 	MOCK_SET(spdk_get_ticks, 0);
971 
972 	/* Enable QoS */
973 	bdev = &g_bdev.bdev;
974 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
975 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
976 	TAILQ_INIT(&bdev->internal.qos->queued);
977 	/*
978 	 * Enable read/write IOPS, write only byte per sec and
979 	 * read/write byte per second rate limits.
980 	 * In this case, read/write byte per second rate limit will
981 	 * take effect first.
982 	 */
983 	/* 2000 read/write I/O per second, or 2 per millisecond */
984 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
985 	/* 4K byte per millisecond with 4K block size */
986 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
987 	/* 8K byte per millisecond with 4K block size */
988 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
989 
990 	g_get_io_channel = true;
991 
992 	/* Create channels */
993 	set_thread(0);
994 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
995 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
996 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
997 
998 	set_thread(1);
999 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1000 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1001 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1002 
1003 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1004 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1005 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1006 	CU_ASSERT(rc == 0);
1007 	set_thread(0);
1008 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1009 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1010 	CU_ASSERT(rc == 0);
1011 
1012 	poll_threads();
1013 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1014 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1015 
1016 	/* Reset the bdev. */
1017 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1018 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1019 	CU_ASSERT(rc == 0);
1020 
1021 	/* Complete any I/O that arrived at the disk */
1022 	poll_threads();
1023 	set_thread(1);
1024 	stub_complete_io(g_bdev.io_target, 0);
1025 	set_thread(0);
1026 	stub_complete_io(g_bdev.io_target, 0);
1027 	poll_threads();
1028 
1029 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1030 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1031 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1032 
1033 	/* Tear down the channels */
1034 	set_thread(1);
1035 	spdk_put_io_channel(io_ch[1]);
1036 	set_thread(0);
1037 	spdk_put_io_channel(io_ch[0]);
1038 	poll_threads();
1039 
1040 	teardown_test();
1041 }
1042 
1043 static void
1044 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1045 {
1046 	enum spdk_bdev_io_status *status = cb_arg;
1047 
1048 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1049 	spdk_bdev_free_io(bdev_io);
1050 }
1051 
1052 static void
1053 enomem(void)
1054 {
1055 	struct spdk_io_channel *io_ch;
1056 	struct spdk_bdev_channel *bdev_ch;
1057 	struct spdk_bdev_shared_resource *shared_resource;
1058 	struct ut_bdev_channel *ut_ch;
1059 	const uint32_t IO_ARRAY_SIZE = 64;
1060 	const uint32_t AVAIL = 20;
1061 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1062 	uint32_t nomem_cnt, i;
1063 	struct spdk_bdev_io *first_io;
1064 	int rc;
1065 
1066 	setup_test();
1067 
1068 	set_thread(0);
1069 	io_ch = spdk_bdev_get_io_channel(g_desc);
1070 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1071 	shared_resource = bdev_ch->shared_resource;
1072 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1073 	ut_ch->avail_cnt = AVAIL;
1074 
1075 	/* First submit a number of IOs equal to what the channel can support. */
1076 	for (i = 0; i < AVAIL; i++) {
1077 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1078 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1079 		CU_ASSERT(rc == 0);
1080 	}
1081 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1082 
1083 	/*
1084 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1085 	 *  the enomem_io list.
1086 	 */
1087 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1088 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1089 	CU_ASSERT(rc == 0);
1090 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1091 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1092 
1093 	/*
1094 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1095 	 *  the first_io above.
1096 	 */
1097 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1098 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1099 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1100 		CU_ASSERT(rc == 0);
1101 	}
1102 
1103 	/* Assert that first_io is still at the head of the list. */
1104 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1105 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1106 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1107 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1108 
1109 	/*
1110 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1111 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1112 	 *  list.
1113 	 */
1114 	stub_complete_io(g_bdev.io_target, 1);
1115 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1116 
1117 	/*
1118 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1119 	 *  and we should see I/O get resubmitted to the test bdev module.
1120 	 */
1121 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1122 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1123 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1124 
1125 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1126 	stub_complete_io(g_bdev.io_target, 1);
1127 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1128 
1129 	/*
1130 	 * Send a reset and confirm that all I/O are completed, including the ones that
1131 	 *  were queued on the nomem_io list.
1132 	 */
1133 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1134 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1135 	poll_threads();
1136 	CU_ASSERT(rc == 0);
1137 	/* This will complete the reset. */
1138 	stub_complete_io(g_bdev.io_target, 0);
1139 
1140 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1141 	CU_ASSERT(shared_resource->io_outstanding == 0);
1142 
1143 	spdk_put_io_channel(io_ch);
1144 	poll_threads();
1145 	teardown_test();
1146 }
1147 
1148 static void
1149 enomem_multi_bdev(void)
1150 {
1151 	struct spdk_io_channel *io_ch;
1152 	struct spdk_bdev_channel *bdev_ch;
1153 	struct spdk_bdev_shared_resource *shared_resource;
1154 	struct ut_bdev_channel *ut_ch;
1155 	const uint32_t IO_ARRAY_SIZE = 64;
1156 	const uint32_t AVAIL = 20;
1157 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1158 	uint32_t i;
1159 	struct ut_bdev *second_bdev;
1160 	struct spdk_bdev_desc *second_desc = NULL;
1161 	struct spdk_bdev_channel *second_bdev_ch;
1162 	struct spdk_io_channel *second_ch;
1163 	int rc;
1164 
1165 	setup_test();
1166 
1167 	/* Register second bdev with the same io_target  */
1168 	second_bdev = calloc(1, sizeof(*second_bdev));
1169 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1170 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1171 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1172 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1173 
1174 	set_thread(0);
1175 	io_ch = spdk_bdev_get_io_channel(g_desc);
1176 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1177 	shared_resource = bdev_ch->shared_resource;
1178 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1179 	ut_ch->avail_cnt = AVAIL;
1180 
1181 	second_ch = spdk_bdev_get_io_channel(second_desc);
1182 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1183 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1184 
1185 	/* Saturate io_target through bdev A. */
1186 	for (i = 0; i < AVAIL; i++) {
1187 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1188 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1189 		CU_ASSERT(rc == 0);
1190 	}
1191 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1192 
1193 	/*
1194 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1195 	 * and then go onto the nomem_io list.
1196 	 */
1197 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1198 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1199 	CU_ASSERT(rc == 0);
1200 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1201 
1202 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1203 	stub_complete_io(g_bdev.io_target, AVAIL);
1204 
1205 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1206 	CU_ASSERT(shared_resource->io_outstanding == 1);
1207 
1208 	/* Now complete our retried I/O  */
1209 	stub_complete_io(g_bdev.io_target, 1);
1210 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1211 
1212 	spdk_put_io_channel(io_ch);
1213 	spdk_put_io_channel(second_ch);
1214 	spdk_bdev_close(second_desc);
1215 	unregister_bdev(second_bdev);
1216 	poll_threads();
1217 	free(second_bdev);
1218 	teardown_test();
1219 }
1220 
1221 
1222 static void
1223 enomem_multi_io_target(void)
1224 {
1225 	struct spdk_io_channel *io_ch;
1226 	struct spdk_bdev_channel *bdev_ch;
1227 	struct ut_bdev_channel *ut_ch;
1228 	const uint32_t IO_ARRAY_SIZE = 64;
1229 	const uint32_t AVAIL = 20;
1230 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1231 	uint32_t i;
1232 	int new_io_device;
1233 	struct ut_bdev *second_bdev;
1234 	struct spdk_bdev_desc *second_desc = NULL;
1235 	struct spdk_bdev_channel *second_bdev_ch;
1236 	struct spdk_io_channel *second_ch;
1237 	int rc;
1238 
1239 	setup_test();
1240 
1241 	/* Create new io_target and a second bdev using it */
1242 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1243 				sizeof(struct ut_bdev_channel), NULL);
1244 	second_bdev = calloc(1, sizeof(*second_bdev));
1245 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1246 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1247 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1248 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1249 
1250 	set_thread(0);
1251 	io_ch = spdk_bdev_get_io_channel(g_desc);
1252 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1253 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1254 	ut_ch->avail_cnt = AVAIL;
1255 
1256 	/* Different io_target should imply a different shared_resource */
1257 	second_ch = spdk_bdev_get_io_channel(second_desc);
1258 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1259 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1260 
1261 	/* Saturate io_target through bdev A. */
1262 	for (i = 0; i < AVAIL; i++) {
1263 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1264 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1265 		CU_ASSERT(rc == 0);
1266 	}
1267 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1268 
1269 	/* Issue one more I/O to fill ENOMEM list. */
1270 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1271 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1272 	CU_ASSERT(rc == 0);
1273 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1274 
1275 	/*
1276 	 * Now submit I/O through the second bdev. This should go through and complete
1277 	 * successfully because we're using a different io_device underneath.
1278 	 */
1279 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1280 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1281 	CU_ASSERT(rc == 0);
1282 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1283 	stub_complete_io(second_bdev->io_target, 1);
1284 
1285 	/* Cleanup; Complete outstanding I/O. */
1286 	stub_complete_io(g_bdev.io_target, AVAIL);
1287 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1288 	/* Complete the ENOMEM I/O */
1289 	stub_complete_io(g_bdev.io_target, 1);
1290 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1291 
1292 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1293 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1294 	spdk_put_io_channel(io_ch);
1295 	spdk_put_io_channel(second_ch);
1296 	spdk_bdev_close(second_desc);
1297 	unregister_bdev(second_bdev);
1298 	spdk_io_device_unregister(&new_io_device, NULL);
1299 	poll_threads();
1300 	free(second_bdev);
1301 	teardown_test();
1302 }
1303 
1304 static void
1305 qos_dynamic_enable_done(void *cb_arg, int status)
1306 {
1307 	int *rc = cb_arg;
1308 	*rc = status;
1309 }
1310 
1311 static void
1312 qos_dynamic_enable(void)
1313 {
1314 	struct spdk_io_channel *io_ch[2];
1315 	struct spdk_bdev_channel *bdev_ch[2];
1316 	struct spdk_bdev *bdev;
1317 	enum spdk_bdev_io_status bdev_io_status[2];
1318 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1319 	int status, second_status, rc, i;
1320 
1321 	setup_test();
1322 	MOCK_SET(spdk_get_ticks, 0);
1323 
1324 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1325 		limits[i] = UINT64_MAX;
1326 	}
1327 
1328 	bdev = &g_bdev.bdev;
1329 
1330 	g_get_io_channel = true;
1331 
1332 	/* Create channels */
1333 	set_thread(0);
1334 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1335 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1336 	CU_ASSERT(bdev_ch[0]->flags == 0);
1337 
1338 	set_thread(1);
1339 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1340 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1341 	CU_ASSERT(bdev_ch[1]->flags == 0);
1342 
1343 	set_thread(0);
1344 
1345 	/*
1346 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1347 	 * Read only byte and Write only byte per second
1348 	 * rate limits.
1349 	 * More than 10 I/Os allowed per timeslice.
1350 	 */
1351 	status = -1;
1352 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1353 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1354 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1355 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1356 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1357 	poll_threads();
1358 	CU_ASSERT(status == 0);
1359 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1360 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1361 
1362 	/*
1363 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1364 	 * Additional I/O will then be queued.
1365 	 */
1366 	set_thread(0);
1367 	for (i = 0; i < 10; i++) {
1368 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1369 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1370 		CU_ASSERT(rc == 0);
1371 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1372 		poll_thread(0);
1373 		stub_complete_io(g_bdev.io_target, 0);
1374 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1375 	}
1376 
1377 	/*
1378 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1379 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1380 	 *  1) are not aborted
1381 	 *  2) are sent back to their original thread for resubmission
1382 	 */
1383 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1384 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1385 	CU_ASSERT(rc == 0);
1386 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1387 	set_thread(1);
1388 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1389 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1390 	CU_ASSERT(rc == 0);
1391 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1392 	poll_threads();
1393 
1394 	/*
1395 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1396 	 * Read only byte rate limits
1397 	 */
1398 	status = -1;
1399 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1400 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1401 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1402 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1403 	poll_threads();
1404 	CU_ASSERT(status == 0);
1405 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1406 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1407 
1408 	/* Disable QoS: Write only Byte per second rate limit */
1409 	status = -1;
1410 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1411 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1412 	poll_threads();
1413 	CU_ASSERT(status == 0);
1414 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1415 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1416 
1417 	/*
1418 	 * All I/O should have been resubmitted back on their original thread.  Complete
1419 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1420 	 */
1421 	set_thread(0);
1422 	stub_complete_io(g_bdev.io_target, 0);
1423 	poll_threads();
1424 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1425 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1426 
1427 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1428 	set_thread(1);
1429 	stub_complete_io(g_bdev.io_target, 0);
1430 	poll_threads();
1431 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1432 
1433 	/* Disable QoS again */
1434 	status = -1;
1435 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1436 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1437 	poll_threads();
1438 	CU_ASSERT(status == 0); /* This should succeed */
1439 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1440 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1441 
1442 	/* Enable QoS on thread 0 */
1443 	status = -1;
1444 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1445 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1446 	poll_threads();
1447 	CU_ASSERT(status == 0);
1448 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1449 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1450 
1451 	/* Disable QoS on thread 1 */
1452 	set_thread(1);
1453 	status = -1;
1454 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1455 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1456 	/* Don't poll yet. This should leave the channels with QoS enabled */
1457 	CU_ASSERT(status == -1);
1458 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1459 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1460 
1461 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1462 	second_status = 0;
1463 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1464 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1465 	poll_threads();
1466 	CU_ASSERT(status == 0); /* The disable should succeed */
1467 	CU_ASSERT(second_status < 0); /* The enable should fail */
1468 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1469 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1470 
1471 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1472 	status = -1;
1473 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1474 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1475 	poll_threads();
1476 	CU_ASSERT(status == 0);
1477 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1478 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1479 
1480 	/* Tear down the channels */
1481 	set_thread(0);
1482 	spdk_put_io_channel(io_ch[0]);
1483 	set_thread(1);
1484 	spdk_put_io_channel(io_ch[1]);
1485 	poll_threads();
1486 
1487 	set_thread(0);
1488 	teardown_test();
1489 }
1490 
1491 static void
1492 histogram_status_cb(void *cb_arg, int status)
1493 {
1494 	g_status = status;
1495 }
1496 
1497 static void
1498 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1499 {
1500 	g_status = status;
1501 	g_histogram = histogram;
1502 }
1503 
1504 static void
1505 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1506 		   uint64_t total, uint64_t so_far)
1507 {
1508 	g_count += count;
1509 }
1510 
1511 static void
1512 bdev_histograms_mt(void)
1513 {
1514 	struct spdk_io_channel *ch[2];
1515 	struct spdk_histogram_data *histogram;
1516 	uint8_t buf[4096];
1517 	int status = false;
1518 	int rc;
1519 
1520 
1521 	setup_test();
1522 
1523 	set_thread(0);
1524 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1525 	CU_ASSERT(ch[0] != NULL);
1526 
1527 	set_thread(1);
1528 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1529 	CU_ASSERT(ch[1] != NULL);
1530 
1531 
1532 	/* Enable histogram */
1533 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1534 	poll_threads();
1535 	CU_ASSERT(g_status == 0);
1536 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1537 
1538 	/* Allocate histogram */
1539 	histogram = spdk_histogram_data_alloc();
1540 
1541 	/* Check if histogram is zeroed */
1542 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1543 	poll_threads();
1544 	CU_ASSERT(g_status == 0);
1545 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1546 
1547 	g_count = 0;
1548 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1549 
1550 	CU_ASSERT(g_count == 0);
1551 
1552 	set_thread(0);
1553 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1554 	CU_ASSERT(rc == 0);
1555 
1556 	spdk_delay_us(10);
1557 	stub_complete_io(g_bdev.io_target, 1);
1558 	poll_threads();
1559 	CU_ASSERT(status == true);
1560 
1561 
1562 	set_thread(1);
1563 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1564 	CU_ASSERT(rc == 0);
1565 
1566 	spdk_delay_us(10);
1567 	stub_complete_io(g_bdev.io_target, 1);
1568 	poll_threads();
1569 	CU_ASSERT(status == true);
1570 
1571 	set_thread(0);
1572 
1573 	/* Check if histogram gathered data from all I/O channels */
1574 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1575 	poll_threads();
1576 	CU_ASSERT(g_status == 0);
1577 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1578 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1579 
1580 	g_count = 0;
1581 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1582 	CU_ASSERT(g_count == 2);
1583 
1584 	/* Disable histogram */
1585 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1586 	poll_threads();
1587 	CU_ASSERT(g_status == 0);
1588 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1589 
1590 	spdk_histogram_data_free(histogram);
1591 
1592 	/* Tear down the channels */
1593 	set_thread(0);
1594 	spdk_put_io_channel(ch[0]);
1595 	set_thread(1);
1596 	spdk_put_io_channel(ch[1]);
1597 	poll_threads();
1598 	set_thread(0);
1599 	teardown_test();
1600 
1601 }
1602 
1603 struct timeout_io_cb_arg {
1604 	struct iovec iov;
1605 	uint8_t type;
1606 };
1607 
1608 static int
1609 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1610 {
1611 	struct spdk_bdev_io *bdev_io;
1612 	int n = 0;
1613 
1614 	if (!ch) {
1615 		return -1;
1616 	}
1617 
1618 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1619 		n++;
1620 	}
1621 
1622 	return n;
1623 }
1624 
1625 static void
1626 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1627 {
1628 	struct timeout_io_cb_arg *ctx = cb_arg;
1629 
1630 	ctx->type = bdev_io->type;
1631 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1632 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1633 }
1634 
1635 static bool g_io_done;
1636 
1637 static void
1638 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1639 {
1640 	g_io_done = true;
1641 	spdk_bdev_free_io(bdev_io);
1642 }
1643 
1644 static void
1645 bdev_set_io_timeout_mt(void)
1646 {
1647 	struct spdk_io_channel *ch[3];
1648 	struct spdk_bdev_channel *bdev_ch[3];
1649 	struct timeout_io_cb_arg cb_arg;
1650 
1651 	setup_test();
1652 
1653 	g_bdev.bdev.optimal_io_boundary = 16;
1654 	g_bdev.bdev.split_on_optimal_io_boundary = true;
1655 
1656 	set_thread(0);
1657 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1658 	CU_ASSERT(ch[0] != NULL);
1659 
1660 	set_thread(1);
1661 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1662 	CU_ASSERT(ch[1] != NULL);
1663 
1664 	set_thread(2);
1665 	ch[2] = spdk_bdev_get_io_channel(g_desc);
1666 	CU_ASSERT(ch[2] != NULL);
1667 
1668 	/* Multi-thread mode
1669 	 * 1, Check the poller was registered successfully
1670 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
1671 	 * 3, Check the link int the bdev_ch works right.
1672 	 * 4, Close desc and put io channel during the timeout poller is polling
1673 	 */
1674 
1675 	/* In desc thread set the timeout */
1676 	set_thread(0);
1677 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
1678 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
1679 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
1680 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
1681 
1682 	/* check the IO submitted list and timeout handler */
1683 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
1684 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
1685 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
1686 
1687 	set_thread(1);
1688 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
1689 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
1690 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
1691 
1692 	/* Now test that a single-vector command is split correctly.
1693 	 * Offset 14, length 8, payload 0xF000
1694 	 *  Child - Offset 14, length 2, payload 0xF000
1695 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1696 	 *
1697 	 * Set up the expected values before calling spdk_bdev_read_blocks
1698 	 */
1699 	set_thread(2);
1700 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
1701 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
1702 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
1703 
1704 	set_thread(0);
1705 	memset(&cb_arg, 0, sizeof(cb_arg));
1706 	spdk_delay_us(3 * spdk_get_ticks_hz());
1707 	poll_threads();
1708 	CU_ASSERT(cb_arg.type == 0);
1709 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1710 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1711 
1712 	/* Now the time reach the limit */
1713 	spdk_delay_us(3 * spdk_get_ticks_hz());
1714 	poll_thread(0);
1715 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
1716 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
1717 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
1718 	stub_complete_io(g_bdev.io_target, 1);
1719 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
1720 
1721 	memset(&cb_arg, 0, sizeof(cb_arg));
1722 	set_thread(1);
1723 	poll_thread(1);
1724 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
1725 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
1726 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
1727 	stub_complete_io(g_bdev.io_target, 1);
1728 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
1729 
1730 	memset(&cb_arg, 0, sizeof(cb_arg));
1731 	set_thread(2);
1732 	poll_thread(2);
1733 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
1734 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
1735 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
1736 	stub_complete_io(g_bdev.io_target, 1);
1737 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
1738 	stub_complete_io(g_bdev.io_target, 1);
1739 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
1740 
1741 	/* Run poll_timeout_done() it means complete the timeout poller */
1742 	set_thread(0);
1743 	poll_thread(0);
1744 	CU_ASSERT(g_desc->refs == 0);
1745 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
1746 	set_thread(1);
1747 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
1748 	set_thread(2);
1749 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
1750 
1751 	/* Trigger timeout poller to run again, desc->refs is incremented.
1752 	 * In thread 0 we destroy the io channel before timeout poller runs.
1753 	 * Timeout callback is not called on thread 0.
1754 	 */
1755 	spdk_delay_us(6 * spdk_get_ticks_hz());
1756 	memset(&cb_arg, 0, sizeof(cb_arg));
1757 	set_thread(0);
1758 	stub_complete_io(g_bdev.io_target, 1);
1759 	spdk_put_io_channel(ch[0]);
1760 	poll_thread(0);
1761 	CU_ASSERT(g_desc->refs == 1)
1762 	CU_ASSERT(cb_arg.type == 0);
1763 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1764 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1765 
1766 	/* In thread 1 timeout poller runs then we destroy the io channel
1767 	 * Timeout callback is called on thread 1.
1768 	 */
1769 	memset(&cb_arg, 0, sizeof(cb_arg));
1770 	set_thread(1);
1771 	poll_thread(1);
1772 	stub_complete_io(g_bdev.io_target, 1);
1773 	spdk_put_io_channel(ch[1]);
1774 	poll_thread(1);
1775 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
1776 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
1777 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
1778 
1779 	/* Close the desc.
1780 	 * Unregister the timeout poller first.
1781 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
1782 	 */
1783 	set_thread(0);
1784 	spdk_bdev_close(g_desc);
1785 	CU_ASSERT(g_desc->refs == 1);
1786 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
1787 
1788 	/* Timeout poller runs on thread 2 then we destroy the io channel.
1789 	 * Desc is closed so we would exit the timeout poller directly.
1790 	 * timeout callback is not called on thread 2.
1791 	 */
1792 	memset(&cb_arg, 0, sizeof(cb_arg));
1793 	set_thread(2);
1794 	poll_thread(2);
1795 	stub_complete_io(g_bdev.io_target, 1);
1796 	spdk_put_io_channel(ch[2]);
1797 	poll_thread(2);
1798 	CU_ASSERT(cb_arg.type == 0);
1799 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1800 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1801 
1802 	set_thread(0);
1803 	poll_thread(0);
1804 	g_teardown_done = false;
1805 	unregister_bdev(&g_bdev);
1806 	spdk_io_device_unregister(&g_io_device, NULL);
1807 	spdk_bdev_finish(finish_cb, NULL);
1808 	poll_threads();
1809 	memset(&g_bdev, 0, sizeof(g_bdev));
1810 	CU_ASSERT(g_teardown_done == true);
1811 	g_teardown_done = false;
1812 	free_threads();
1813 	free_cores();
1814 }
1815 
1816 static bool g_io_done2;
1817 static bool g_lock_lba_range_done;
1818 static bool g_unlock_lba_range_done;
1819 
1820 static void
1821 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1822 {
1823 	g_io_done2 = true;
1824 	spdk_bdev_free_io(bdev_io);
1825 }
1826 
1827 static void
1828 lock_lba_range_done(void *ctx, int status)
1829 {
1830 	g_lock_lba_range_done = true;
1831 }
1832 
1833 static void
1834 unlock_lba_range_done(void *ctx, int status)
1835 {
1836 	g_unlock_lba_range_done = true;
1837 }
1838 
1839 static uint32_t
1840 stub_channel_outstanding_cnt(void *io_target)
1841 {
1842 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
1843 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
1844 	uint32_t outstanding_cnt;
1845 
1846 	outstanding_cnt = ch->outstanding_cnt;
1847 
1848 	spdk_put_io_channel(_ch);
1849 	return outstanding_cnt;
1850 }
1851 
1852 static void
1853 lock_lba_range_then_submit_io(void)
1854 {
1855 	struct spdk_bdev_desc *desc = NULL;
1856 	void *io_target;
1857 	struct spdk_io_channel *io_ch[3];
1858 	struct spdk_bdev_channel *bdev_ch[3];
1859 	struct lba_range *range;
1860 	char buf[4096];
1861 	int ctx0, ctx1, ctx2;
1862 	int rc;
1863 
1864 	setup_test();
1865 
1866 	io_target = g_bdev.io_target;
1867 	desc = g_desc;
1868 
1869 	set_thread(0);
1870 	io_ch[0] = spdk_bdev_get_io_channel(desc);
1871 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1872 	CU_ASSERT(io_ch[0] != NULL);
1873 
1874 	set_thread(1);
1875 	io_ch[1] = spdk_bdev_get_io_channel(desc);
1876 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1877 	CU_ASSERT(io_ch[1] != NULL);
1878 
1879 	set_thread(0);
1880 	g_lock_lba_range_done = false;
1881 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
1882 	CU_ASSERT(rc == 0);
1883 	poll_threads();
1884 
1885 	/* The lock should immediately become valid, since there are no outstanding
1886 	 * write I/O.
1887 	 */
1888 	CU_ASSERT(g_lock_lba_range_done == true);
1889 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
1890 	SPDK_CU_ASSERT_FATAL(range != NULL);
1891 	CU_ASSERT(range->offset == 20);
1892 	CU_ASSERT(range->length == 10);
1893 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
1894 
1895 	g_io_done = false;
1896 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1897 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
1898 	CU_ASSERT(rc == 0);
1899 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1900 
1901 	stub_complete_io(io_target, 1);
1902 	poll_threads();
1903 	CU_ASSERT(g_io_done == true);
1904 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1905 
1906 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
1907 	 * holding the lock is submitting the write I/O.
1908 	 */
1909 	g_io_done = false;
1910 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1911 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
1912 	CU_ASSERT(rc == 0);
1913 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1914 
1915 	stub_complete_io(io_target, 1);
1916 	poll_threads();
1917 	CU_ASSERT(g_io_done == true);
1918 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1919 
1920 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
1921 	set_thread(1);
1922 	g_io_done = false;
1923 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1924 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
1925 	CU_ASSERT(rc == 0);
1926 	poll_threads();
1927 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
1928 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1929 	CU_ASSERT(g_io_done == false);
1930 
1931 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
1932 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
1933 	CU_ASSERT(rc == -EINVAL);
1934 
1935 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
1936 	 * The new channel should inherit the active locks from the bdev's internal list.
1937 	 */
1938 	set_thread(2);
1939 	io_ch[2] = spdk_bdev_get_io_channel(desc);
1940 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
1941 	CU_ASSERT(io_ch[2] != NULL);
1942 
1943 	g_io_done2 = false;
1944 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1945 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
1946 	CU_ASSERT(rc == 0);
1947 	poll_threads();
1948 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
1949 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1950 	CU_ASSERT(g_io_done2 == false);
1951 
1952 	set_thread(0);
1953 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
1954 	CU_ASSERT(rc == 0);
1955 	poll_threads();
1956 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
1957 
1958 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
1959 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1960 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1961 
1962 	set_thread(1);
1963 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1964 	stub_complete_io(io_target, 1);
1965 	set_thread(2);
1966 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1967 	stub_complete_io(io_target, 1);
1968 
1969 	poll_threads();
1970 	CU_ASSERT(g_io_done == true);
1971 	CU_ASSERT(g_io_done2 == true);
1972 
1973 	/* Tear down the channels */
1974 	set_thread(0);
1975 	spdk_put_io_channel(io_ch[0]);
1976 	set_thread(1);
1977 	spdk_put_io_channel(io_ch[1]);
1978 	set_thread(2);
1979 	spdk_put_io_channel(io_ch[2]);
1980 	poll_threads();
1981 	set_thread(0);
1982 	teardown_test();
1983 }
1984 
1985 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
1986  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
1987  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
1988  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
1989  * completes. Test this behavior.
1990  */
1991 static void
1992 unregister_during_reset(void)
1993 {
1994 	struct spdk_io_channel *io_ch[2];
1995 	bool done_reset = false, done_unregister = false;
1996 	int rc;
1997 
1998 	setup_test();
1999 	set_thread(0);
2000 
2001 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2002 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2003 
2004 	set_thread(1);
2005 
2006 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2007 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2008 
2009 	set_thread(0);
2010 
2011 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2012 
2013 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2014 	CU_ASSERT(rc == 0);
2015 
2016 	set_thread(0);
2017 
2018 	poll_thread_times(0, 1);
2019 
2020 	spdk_bdev_close(g_desc);
2021 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2022 
2023 	CU_ASSERT(done_reset == false);
2024 	CU_ASSERT(done_unregister == false);
2025 
2026 	poll_threads();
2027 
2028 	stub_complete_io(g_bdev.io_target, 0);
2029 
2030 	poll_threads();
2031 
2032 	CU_ASSERT(done_reset == true);
2033 	CU_ASSERT(done_unregister == false);
2034 
2035 	spdk_put_io_channel(io_ch[0]);
2036 
2037 	set_thread(1);
2038 
2039 	spdk_put_io_channel(io_ch[1]);
2040 
2041 	poll_threads();
2042 
2043 	CU_ASSERT(done_unregister == true);
2044 
2045 	/* Restore the original g_bdev so that we can use teardown_test(). */
2046 	set_thread(0);
2047 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2048 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2049 	teardown_test();
2050 }
2051 
2052 int
2053 main(int argc, char **argv)
2054 {
2055 	CU_pSuite	suite = NULL;
2056 	unsigned int	num_failures;
2057 
2058 	CU_set_error_action(CUEA_ABORT);
2059 	CU_initialize_registry();
2060 
2061 	suite = CU_add_suite("bdev", NULL, NULL);
2062 
2063 	CU_ADD_TEST(suite, basic);
2064 	CU_ADD_TEST(suite, unregister_and_close);
2065 	CU_ADD_TEST(suite, basic_qos);
2066 	CU_ADD_TEST(suite, put_channel_during_reset);
2067 	CU_ADD_TEST(suite, aborted_reset);
2068 	CU_ADD_TEST(suite, io_during_reset);
2069 	CU_ADD_TEST(suite, io_during_qos_queue);
2070 	CU_ADD_TEST(suite, io_during_qos_reset);
2071 	CU_ADD_TEST(suite, enomem);
2072 	CU_ADD_TEST(suite, enomem_multi_bdev);
2073 	CU_ADD_TEST(suite, enomem_multi_io_target);
2074 	CU_ADD_TEST(suite, qos_dynamic_enable);
2075 	CU_ADD_TEST(suite, bdev_histograms_mt);
2076 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2077 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2078 	CU_ADD_TEST(suite, unregister_during_reset);
2079 
2080 	CU_basic_set_mode(CU_BRM_VERBOSE);
2081 	CU_basic_run_tests();
2082 	num_failures = CU_get_number_of_failures();
2083 	CU_cleanup_registry();
2084 	return num_failures;
2085 }
2086