xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 80b22cf31405a515dce9d470ff11989ff1fdb56e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_cunit.h"
8 
9 #include "common/lib/ut_multithread.c"
10 #include "unit/lib/json_mock.c"
11 
12 #include "spdk/config.h"
13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 #undef SPDK_CONFIG_VTUNE
15 
16 #include "bdev/bdev.c"
17 
18 #define BDEV_UT_NUM_THREADS 3
19 
20 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
21 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
22 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
23 		int *asc, int *ascq));
24 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
25 	    "test_domain");
26 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
27 	    (struct spdk_memory_domain *domain), 0);
28 
29 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
30 int
31 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
32 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
33 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
34 {
35 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
36 
37 	cpl_cb(cpl_cb_arg, 0);
38 	return 0;
39 }
40 
41 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
42 int
43 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
44 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
45 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
46 {
47 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
48 
49 	cpl_cb(cpl_cb_arg, 0);
50 	return 0;
51 }
52 
53 static int g_accel_io_device;
54 
55 struct spdk_io_channel *
56 spdk_accel_get_io_channel(void)
57 {
58 	return spdk_get_io_channel(&g_accel_io_device);
59 }
60 
61 struct ut_bdev {
62 	struct spdk_bdev	bdev;
63 	void			*io_target;
64 };
65 
66 struct ut_bdev_channel {
67 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
68 	uint32_t			outstanding_cnt;
69 	uint32_t			avail_cnt;
70 };
71 
72 int g_io_device;
73 struct ut_bdev g_bdev;
74 struct spdk_bdev_desc *g_desc;
75 bool g_teardown_done = false;
76 bool g_get_io_channel = true;
77 bool g_create_ch = true;
78 bool g_init_complete_called = false;
79 bool g_fini_start_called = true;
80 int g_status = 0;
81 int g_count = 0;
82 struct spdk_histogram_data *g_histogram = NULL;
83 
84 static int
85 ut_accel_ch_create_cb(void *io_device, void *ctx)
86 {
87 	return 0;
88 }
89 
90 static void
91 ut_accel_ch_destroy_cb(void *io_device, void *ctx)
92 {
93 }
94 
95 static int
96 stub_create_ch(void *io_device, void *ctx_buf)
97 {
98 	struct ut_bdev_channel *ch = ctx_buf;
99 
100 	if (g_create_ch == false) {
101 		return -1;
102 	}
103 
104 	TAILQ_INIT(&ch->outstanding_io);
105 	ch->outstanding_cnt = 0;
106 	/*
107 	 * When avail gets to 0, the submit_request function will return ENOMEM.
108 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
109 	 *  big value that won't get hit.  The ENOMEM tests can then override this
110 	 *  value to something much smaller to induce ENOMEM conditions.
111 	 */
112 	ch->avail_cnt = 2048;
113 	return 0;
114 }
115 
116 static void
117 stub_destroy_ch(void *io_device, void *ctx_buf)
118 {
119 }
120 
121 static struct spdk_io_channel *
122 stub_get_io_channel(void *ctx)
123 {
124 	struct ut_bdev *ut_bdev = ctx;
125 
126 	if (g_get_io_channel == true) {
127 		return spdk_get_io_channel(ut_bdev->io_target);
128 	} else {
129 		return NULL;
130 	}
131 }
132 
133 static int
134 stub_destruct(void *ctx)
135 {
136 	return 0;
137 }
138 
139 static void
140 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
141 {
142 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
143 	struct spdk_bdev_io *io;
144 
145 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
146 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
147 			io = TAILQ_FIRST(&ch->outstanding_io);
148 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
149 			ch->outstanding_cnt--;
150 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
151 			ch->avail_cnt++;
152 		}
153 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
154 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
155 			if (io == bdev_io->u.abort.bio_to_abort) {
156 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
157 				ch->outstanding_cnt--;
158 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
159 				ch->avail_cnt++;
160 
161 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
162 				return;
163 			}
164 		}
165 
166 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
167 		return;
168 	}
169 
170 	if (ch->avail_cnt > 0) {
171 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
172 		ch->outstanding_cnt++;
173 		ch->avail_cnt--;
174 	} else {
175 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
176 	}
177 }
178 
179 static uint32_t
180 stub_complete_io(void *io_target, uint32_t num_to_complete)
181 {
182 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
183 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
184 	struct spdk_bdev_io *io;
185 	bool complete_all = (num_to_complete == 0);
186 	uint32_t num_completed = 0;
187 
188 	while (complete_all || num_completed < num_to_complete) {
189 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
190 			break;
191 		}
192 		io = TAILQ_FIRST(&ch->outstanding_io);
193 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
194 		ch->outstanding_cnt--;
195 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
196 		ch->avail_cnt++;
197 		num_completed++;
198 	}
199 	spdk_put_io_channel(_ch);
200 	return num_completed;
201 }
202 
203 static bool
204 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
205 {
206 	return true;
207 }
208 
209 static struct spdk_bdev_fn_table fn_table = {
210 	.get_io_channel =	stub_get_io_channel,
211 	.destruct =		stub_destruct,
212 	.submit_request =	stub_submit_request,
213 	.io_type_supported =	stub_io_type_supported,
214 };
215 
216 struct spdk_bdev_module bdev_ut_if;
217 
218 static int
219 module_init(void)
220 {
221 	spdk_bdev_module_init_done(&bdev_ut_if);
222 	return 0;
223 }
224 
225 static void
226 module_fini(void)
227 {
228 }
229 
230 static void
231 init_complete(void)
232 {
233 	g_init_complete_called = true;
234 }
235 
236 static void
237 fini_start(void)
238 {
239 	g_fini_start_called = true;
240 }
241 
242 struct spdk_bdev_module bdev_ut_if = {
243 	.name = "bdev_ut",
244 	.module_init = module_init,
245 	.module_fini = module_fini,
246 	.async_init = true,
247 	.init_complete = init_complete,
248 	.fini_start = fini_start,
249 };
250 
251 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
252 
253 static void
254 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
255 {
256 	memset(ut_bdev, 0, sizeof(*ut_bdev));
257 
258 	ut_bdev->io_target = io_target;
259 	ut_bdev->bdev.ctxt = ut_bdev;
260 	ut_bdev->bdev.name = name;
261 	ut_bdev->bdev.fn_table = &fn_table;
262 	ut_bdev->bdev.module = &bdev_ut_if;
263 	ut_bdev->bdev.blocklen = 4096;
264 	ut_bdev->bdev.blockcnt = 1024;
265 
266 	spdk_bdev_register(&ut_bdev->bdev);
267 }
268 
269 static void
270 unregister_bdev(struct ut_bdev *ut_bdev)
271 {
272 	/* Handle any deferred messages. */
273 	poll_threads();
274 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
275 	/* Handle the async bdev unregister. */
276 	poll_threads();
277 }
278 
279 static void
280 bdev_init_cb(void *done, int rc)
281 {
282 	CU_ASSERT(rc == 0);
283 	*(bool *)done = true;
284 }
285 
286 static void
287 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
288 	       void *event_ctx)
289 {
290 	switch (type) {
291 	case SPDK_BDEV_EVENT_REMOVE:
292 		if (event_ctx != NULL) {
293 			*(bool *)event_ctx = true;
294 		}
295 		break;
296 	case SPDK_BDEV_EVENT_RESIZE:
297 		if (event_ctx != NULL) {
298 			*(int *)event_ctx += 1;
299 		}
300 		break;
301 	default:
302 		CU_ASSERT(false);
303 		break;
304 	}
305 }
306 
307 static void
308 setup_test(void)
309 {
310 	bool done = false;
311 	int rc;
312 
313 	allocate_cores(BDEV_UT_NUM_THREADS);
314 	allocate_threads(BDEV_UT_NUM_THREADS);
315 	set_thread(0);
316 
317 	rc = spdk_iobuf_initialize();
318 	CU_ASSERT(rc == 0);
319 	spdk_bdev_initialize(bdev_init_cb, &done);
320 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
321 				sizeof(struct ut_bdev_channel), NULL);
322 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
323 				ut_accel_ch_destroy_cb, 0, NULL);
324 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
325 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
326 }
327 
328 static void
329 finish_cb(void *cb_arg)
330 {
331 	g_teardown_done = true;
332 }
333 
334 static void
335 teardown_test(void)
336 {
337 	set_thread(0);
338 	g_teardown_done = false;
339 	spdk_bdev_close(g_desc);
340 	g_desc = NULL;
341 	unregister_bdev(&g_bdev);
342 	spdk_io_device_unregister(&g_io_device, NULL);
343 	spdk_bdev_finish(finish_cb, NULL);
344 	spdk_io_device_unregister(&g_accel_io_device, NULL);
345 	spdk_iobuf_finish(finish_cb, NULL);
346 	poll_threads();
347 	memset(&g_bdev, 0, sizeof(g_bdev));
348 	CU_ASSERT(g_teardown_done == true);
349 	g_teardown_done = false;
350 	free_threads();
351 	free_cores();
352 }
353 
354 static uint32_t
355 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
356 {
357 	struct spdk_bdev_io *io;
358 	uint32_t cnt = 0;
359 
360 	TAILQ_FOREACH(io, tailq, internal.link) {
361 		cnt++;
362 	}
363 
364 	return cnt;
365 }
366 
367 static void
368 basic(void)
369 {
370 	g_init_complete_called = false;
371 	setup_test();
372 	CU_ASSERT(g_init_complete_called == true);
373 
374 	set_thread(0);
375 
376 	g_get_io_channel = false;
377 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
378 	CU_ASSERT(g_ut_threads[0].ch == NULL);
379 
380 	g_get_io_channel = true;
381 	g_create_ch = false;
382 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
383 	CU_ASSERT(g_ut_threads[0].ch == NULL);
384 
385 	g_get_io_channel = true;
386 	g_create_ch = true;
387 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
388 	CU_ASSERT(g_ut_threads[0].ch != NULL);
389 	spdk_put_io_channel(g_ut_threads[0].ch);
390 
391 	g_fini_start_called = false;
392 	teardown_test();
393 	CU_ASSERT(g_fini_start_called == true);
394 }
395 
396 static void
397 _bdev_unregistered(void *done, int rc)
398 {
399 	CU_ASSERT(rc == 0);
400 	*(bool *)done = true;
401 }
402 
403 static void
404 unregister_and_close(void)
405 {
406 	bool done, remove_notify;
407 	struct spdk_bdev_desc *desc = NULL;
408 
409 	setup_test();
410 	set_thread(0);
411 
412 	/* setup_test() automatically opens the bdev,
413 	 * but this test needs to do that in a different
414 	 * way. */
415 	spdk_bdev_close(g_desc);
416 	poll_threads();
417 
418 	/* Try hotremoving a bdev with descriptors which don't provide
419 	 * any context to the notification callback */
420 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
421 	SPDK_CU_ASSERT_FATAL(desc != NULL);
422 
423 	/* There is an open descriptor on the device. Unregister it,
424 	 * which can't proceed until the descriptor is closed. */
425 	done = false;
426 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
427 
428 	/* Poll the threads to allow all events to be processed */
429 	poll_threads();
430 
431 	/* Make sure the bdev was not unregistered. We still have a
432 	 * descriptor open */
433 	CU_ASSERT(done == false);
434 
435 	spdk_bdev_close(desc);
436 	poll_threads();
437 	desc = NULL;
438 
439 	/* The unregister should have completed */
440 	CU_ASSERT(done == true);
441 
442 
443 	/* Register the bdev again */
444 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
445 
446 	remove_notify = false;
447 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
448 	SPDK_CU_ASSERT_FATAL(desc != NULL);
449 	CU_ASSERT(remove_notify == false);
450 
451 	/* There is an open descriptor on the device. Unregister it,
452 	 * which can't proceed until the descriptor is closed. */
453 	done = false;
454 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
455 	/* No polling has occurred, so neither of these should execute */
456 	CU_ASSERT(remove_notify == false);
457 	CU_ASSERT(done == false);
458 
459 	/* Prior to the unregister completing, close the descriptor */
460 	spdk_bdev_close(desc);
461 
462 	/* Poll the threads to allow all events to be processed */
463 	poll_threads();
464 
465 	/* Remove notify should not have been called because the
466 	 * descriptor is already closed. */
467 	CU_ASSERT(remove_notify == false);
468 
469 	/* The unregister should have completed */
470 	CU_ASSERT(done == true);
471 
472 	/* Restore the original g_bdev so that we can use teardown_test(). */
473 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
474 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
475 	teardown_test();
476 }
477 
478 static void
479 unregister_and_close_different_threads(void)
480 {
481 	bool done;
482 	struct spdk_bdev_desc *desc = NULL;
483 
484 	setup_test();
485 	set_thread(0);
486 
487 	/* setup_test() automatically opens the bdev,
488 	 * but this test needs to do that in a different
489 	 * way. */
490 	spdk_bdev_close(g_desc);
491 	poll_threads();
492 
493 	set_thread(1);
494 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
495 	SPDK_CU_ASSERT_FATAL(desc != NULL);
496 	done = false;
497 
498 	set_thread(0);
499 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
500 
501 	/* Poll the threads to allow all events to be processed */
502 	poll_threads();
503 
504 	/* Make sure the bdev was not unregistered. We still have a
505 	 * descriptor open */
506 	CU_ASSERT(done == false);
507 
508 	/* Close the descriptor on thread 1.  Poll the thread and confirm the
509 	 * unregister did not complete, since it was unregistered on thread 0.
510 	 */
511 	set_thread(1);
512 	spdk_bdev_close(desc);
513 	poll_thread(1);
514 	CU_ASSERT(done == false);
515 
516 	/* Now poll thread 0 and confirm the unregister completed. */
517 	set_thread(0);
518 	poll_thread(0);
519 	CU_ASSERT(done == true);
520 
521 	/* Restore the original g_bdev so that we can use teardown_test(). */
522 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
523 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
524 	teardown_test();
525 }
526 
527 static void
528 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
529 {
530 	bool *done = cb_arg;
531 
532 	CU_ASSERT(success == true);
533 	*done = true;
534 	spdk_bdev_free_io(bdev_io);
535 }
536 
537 static void
538 put_channel_during_reset(void)
539 {
540 	struct spdk_io_channel *io_ch;
541 	bool done = false;
542 
543 	setup_test();
544 
545 	set_thread(0);
546 	io_ch = spdk_bdev_get_io_channel(g_desc);
547 	CU_ASSERT(io_ch != NULL);
548 
549 	/*
550 	 * Start a reset, but then put the I/O channel before
551 	 *  the deferred messages for the reset get a chance to
552 	 *  execute.
553 	 */
554 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
555 	spdk_put_io_channel(io_ch);
556 	poll_threads();
557 	stub_complete_io(g_bdev.io_target, 0);
558 
559 	teardown_test();
560 }
561 
562 static void
563 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
564 {
565 	enum spdk_bdev_io_status *status = cb_arg;
566 
567 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
568 	spdk_bdev_free_io(bdev_io);
569 }
570 
571 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
572 
573 static void
574 aborted_reset(void)
575 {
576 	struct spdk_io_channel *io_ch[2];
577 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
578 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
579 
580 	setup_test();
581 
582 	set_thread(0);
583 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
584 	CU_ASSERT(io_ch[0] != NULL);
585 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
586 	poll_threads();
587 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
588 
589 	/*
590 	 * First reset has been submitted on ch0.  Now submit a second
591 	 *  reset on ch1 which will get queued since there is already a
592 	 *  reset in progress.
593 	 */
594 	set_thread(1);
595 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
596 	CU_ASSERT(io_ch[1] != NULL);
597 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
598 	poll_threads();
599 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
600 
601 	/*
602 	 * Now destroy ch1.  This will abort the queued reset.  Check that
603 	 *  the second reset was completed with failed status.  Also check
604 	 *  that bdev->internal.reset_in_progress != NULL, since the
605 	 *  original reset has not been completed yet.  This ensures that
606 	 *  the bdev code is correctly noticing that the failed reset is
607 	 *  *not* the one that had been submitted to the bdev module.
608 	 */
609 	set_thread(1);
610 	spdk_put_io_channel(io_ch[1]);
611 	poll_threads();
612 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
613 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
614 
615 	/*
616 	 * Now complete the first reset, verify that it completed with SUCCESS
617 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
618 	 */
619 	set_thread(0);
620 	spdk_put_io_channel(io_ch[0]);
621 	stub_complete_io(g_bdev.io_target, 0);
622 	poll_threads();
623 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
624 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
625 
626 	teardown_test();
627 }
628 
629 static void
630 aborted_reset_no_outstanding_io(void)
631 {
632 	struct spdk_io_channel *io_ch[2];
633 	struct spdk_bdev_channel *bdev_ch[2];
634 	struct spdk_bdev *bdev[2];
635 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
636 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
637 
638 	setup_test();
639 
640 	/*
641 	 * This time we test the reset without any outstanding IO
642 	 * present on the bdev channel, so both resets should finish
643 	 * immediately.
644 	 */
645 
646 	set_thread(0);
647 	/* Set reset_io_drain_timeout to allow bdev
648 	 * reset to stay pending until we call abort. */
649 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
650 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
651 	bdev[0] = bdev_ch[0]->bdev;
652 	bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
653 	CU_ASSERT(io_ch[0] != NULL);
654 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
655 	poll_threads();
656 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
657 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
658 	spdk_put_io_channel(io_ch[0]);
659 
660 	set_thread(1);
661 	/* Set reset_io_drain_timeout to allow bdev
662 	 * reset to stay pending until we call abort. */
663 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
664 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
665 	bdev[1] = bdev_ch[1]->bdev;
666 	bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
667 	CU_ASSERT(io_ch[1] != NULL);
668 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
669 	poll_threads();
670 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
671 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
672 	spdk_put_io_channel(io_ch[1]);
673 
674 	stub_complete_io(g_bdev.io_target, 0);
675 	poll_threads();
676 
677 	teardown_test();
678 }
679 
680 
681 static void
682 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
683 {
684 	enum spdk_bdev_io_status *status = cb_arg;
685 
686 	*status = bdev_io->internal.status;
687 	spdk_bdev_free_io(bdev_io);
688 }
689 
690 static void
691 io_during_reset(void)
692 {
693 	struct spdk_io_channel *io_ch[2];
694 	struct spdk_bdev_channel *bdev_ch[2];
695 	enum spdk_bdev_io_status status0, status1, status_reset;
696 	int rc;
697 
698 	setup_test();
699 
700 	/*
701 	 * First test normal case - submit an I/O on each of two channels (with no resets)
702 	 *  and verify they complete successfully.
703 	 */
704 	set_thread(0);
705 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
706 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
707 	CU_ASSERT(bdev_ch[0]->flags == 0);
708 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
709 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
710 	CU_ASSERT(rc == 0);
711 
712 	set_thread(1);
713 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
714 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
715 	CU_ASSERT(bdev_ch[1]->flags == 0);
716 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
717 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
718 	CU_ASSERT(rc == 0);
719 
720 	poll_threads();
721 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
722 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
723 
724 	set_thread(0);
725 	stub_complete_io(g_bdev.io_target, 0);
726 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
727 
728 	set_thread(1);
729 	stub_complete_io(g_bdev.io_target, 0);
730 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
731 
732 	/*
733 	 * Now submit a reset, and leave it pending while we submit I/O on two different
734 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
735 	 *  progress.
736 	 */
737 	set_thread(0);
738 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
739 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
740 	CU_ASSERT(rc == 0);
741 
742 	CU_ASSERT(bdev_ch[0]->flags == 0);
743 	CU_ASSERT(bdev_ch[1]->flags == 0);
744 	poll_threads();
745 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
746 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
747 
748 	set_thread(0);
749 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
750 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
751 	CU_ASSERT(rc == 0);
752 
753 	set_thread(1);
754 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
755 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
756 	CU_ASSERT(rc == 0);
757 
758 	/*
759 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
760 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
761 	 */
762 	poll_threads();
763 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
764 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
765 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
766 
767 	/*
768 	 * Complete the reset
769 	 */
770 	set_thread(0);
771 	stub_complete_io(g_bdev.io_target, 0);
772 
773 	/*
774 	 * Only poll thread 0. We should not get a completion.
775 	 */
776 	poll_thread(0);
777 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
778 
779 	/*
780 	 * Poll both thread 0 and 1 so the messages can propagate and we
781 	 * get a completion.
782 	 */
783 	poll_threads();
784 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
785 
786 	spdk_put_io_channel(io_ch[0]);
787 	set_thread(1);
788 	spdk_put_io_channel(io_ch[1]);
789 	poll_threads();
790 
791 	teardown_test();
792 }
793 
794 static uint32_t
795 count_queued_resets(void *io_target)
796 {
797 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
798 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
799 	struct spdk_bdev_io *io;
800 	uint32_t submitted_resets = 0;
801 
802 	TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
803 		if (io->type == SPDK_BDEV_IO_TYPE_RESET) {
804 			submitted_resets++;
805 		}
806 	}
807 
808 	spdk_put_io_channel(_ch);
809 
810 	return submitted_resets;
811 }
812 
813 static void
814 reset_completions(void)
815 {
816 	struct spdk_io_channel *io_ch;
817 	struct spdk_bdev_channel *bdev_ch;
818 	struct spdk_bdev *bdev;
819 	enum spdk_bdev_io_status status0, status_reset;
820 	int rc, iter;
821 
822 	setup_test();
823 
824 	/* This test covers four test cases:
825 	 * 1) reset_io_drain_timeout of a bdev is greater than 0
826 	 * 2) No outstandind IO are present on any bdev channel
827 	 * 3) Outstanding IO finish during bdev reset
828 	 * 4) Outstanding IO do not finish before reset is done waiting
829 	 *    for them.
830 	 *
831 	 * Above conditions mainly affect the timing of bdev reset completion
832 	 * and whether a reset should be skipped via spdk_bdev_io_complete()
833 	 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */
834 
835 	/* Test preparation */
836 	set_thread(0);
837 	io_ch = spdk_bdev_get_io_channel(g_desc);
838 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
839 	CU_ASSERT(bdev_ch->flags == 0);
840 
841 
842 	/* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */
843 	bdev = &g_bdev.bdev;
844 	bdev->reset_io_drain_timeout = 0;
845 
846 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
847 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
848 	CU_ASSERT(rc == 0);
849 	poll_threads();
850 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
851 
852 	/* Call reset completion inside bdev module. */
853 	stub_complete_io(g_bdev.io_target, 0);
854 	poll_threads();
855 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
856 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
857 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
858 
859 
860 	/* Test case 2) no outstanding IO are present. Reset should perform one iteration over
861 	* channels and then be skipped. */
862 	bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
863 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
864 
865 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
866 	CU_ASSERT(rc == 0);
867 	poll_threads();
868 	/* Reset was never submitted to the bdev module. */
869 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
870 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
871 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
872 
873 
874 	/* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate
875 	* wait poller to check for IO completions every second, until reset_io_drain_timeout is
876 	* reached, but finish earlier than this threshold. */
877 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
878 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
879 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
880 	CU_ASSERT(rc == 0);
881 
882 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
883 	CU_ASSERT(rc == 0);
884 	poll_threads();
885 	/* The reset just started and should not have been submitted yet. */
886 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
887 
888 	poll_threads();
889 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
890 	/* Let the poller wait for about half the time then complete outstanding IO. */
891 	for (iter = 0; iter < 2; iter++) {
892 		/* Reset is still processing and not submitted at this point. */
893 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
894 		spdk_delay_us(1000 * 1000);
895 		poll_threads();
896 		poll_threads();
897 	}
898 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
899 	stub_complete_io(g_bdev.io_target, 0);
900 	poll_threads();
901 	spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
902 	poll_threads();
903 	poll_threads();
904 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
905 	/* Sending reset to the bdev module has been skipped. */
906 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
907 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
908 
909 
910 	/* Test case 4) outstanding IO are still present after reset_io_drain_timeout
911 	* seconds have passed. */
912 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
913 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
914 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
915 	CU_ASSERT(rc == 0);
916 
917 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
918 	CU_ASSERT(rc == 0);
919 	poll_threads();
920 	/* The reset just started and should not have been submitted yet. */
921 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
922 
923 	poll_threads();
924 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
925 	/* Let the poller wait for reset_io_drain_timeout seconds. */
926 	for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) {
927 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
928 		spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
929 		poll_threads();
930 		poll_threads();
931 	}
932 
933 	/* After timing out, the reset should have been sent to the module. */
934 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
935 	/* Complete reset submitted to the module and the read IO. */
936 	stub_complete_io(g_bdev.io_target, 0);
937 	poll_threads();
938 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
939 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
940 
941 
942 	/* Destroy the channel and end the test. */
943 	spdk_put_io_channel(io_ch);
944 	poll_threads();
945 
946 	teardown_test();
947 }
948 
949 
950 static void
951 basic_qos(void)
952 {
953 	struct spdk_io_channel *io_ch[2];
954 	struct spdk_bdev_channel *bdev_ch[2];
955 	struct spdk_bdev *bdev;
956 	enum spdk_bdev_io_status status, abort_status;
957 	int rc;
958 
959 	setup_test();
960 
961 	/* Enable QoS */
962 	bdev = &g_bdev.bdev;
963 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
964 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
965 	TAILQ_INIT(&bdev->internal.qos->queued);
966 	/*
967 	 * Enable read/write IOPS, read only byte per second and
968 	 * read/write byte per second rate limits.
969 	 * In this case, all rate limits will take equal effect.
970 	 */
971 	/* 2000 read/write I/O per second, or 2 per millisecond */
972 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
973 	/* 8K read/write byte per millisecond with 4K block size */
974 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
975 	/* 8K read only byte per millisecond with 4K block size */
976 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
977 
978 	g_get_io_channel = true;
979 
980 	set_thread(0);
981 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
982 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
983 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
984 
985 	set_thread(1);
986 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
987 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
988 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
989 
990 	/*
991 	 * Send an I/O on thread 0, which is where the QoS thread is running.
992 	 */
993 	set_thread(0);
994 	status = SPDK_BDEV_IO_STATUS_PENDING;
995 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
996 	CU_ASSERT(rc == 0);
997 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
998 	poll_threads();
999 	stub_complete_io(g_bdev.io_target, 0);
1000 	poll_threads();
1001 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1002 
1003 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1004 	status = SPDK_BDEV_IO_STATUS_PENDING;
1005 	set_thread(1);
1006 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1007 	CU_ASSERT(rc == 0);
1008 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1009 	poll_threads();
1010 	/* Complete I/O on thread 0. This should not complete the I/O we submitted. */
1011 	set_thread(0);
1012 	stub_complete_io(g_bdev.io_target, 0);
1013 	poll_threads();
1014 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1015 	/* Now complete I/O on original thread 1. */
1016 	set_thread(1);
1017 	poll_threads();
1018 	stub_complete_io(g_bdev.io_target, 0);
1019 	poll_threads();
1020 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1021 
1022 	/* Reset rate limit for the next test cases. */
1023 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
1024 	poll_threads();
1025 
1026 	/*
1027 	 * Test abort request when QoS is enabled.
1028 	 */
1029 
1030 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
1031 	set_thread(0);
1032 	status = SPDK_BDEV_IO_STATUS_PENDING;
1033 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1034 	CU_ASSERT(rc == 0);
1035 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1036 	/* Send an abort to the I/O on the same thread. */
1037 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1038 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
1039 	CU_ASSERT(rc == 0);
1040 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1041 	poll_threads();
1042 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1043 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1044 
1045 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1046 	status = SPDK_BDEV_IO_STATUS_PENDING;
1047 	set_thread(1);
1048 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1049 	CU_ASSERT(rc == 0);
1050 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1051 	poll_threads();
1052 	/* Send an abort to the I/O on the same thread. */
1053 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1054 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
1055 	CU_ASSERT(rc == 0);
1056 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1057 	poll_threads();
1058 	/* Complete the I/O with failure and the abort with success on thread 1. */
1059 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1060 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1061 
1062 	set_thread(0);
1063 
1064 	/*
1065 	 * Close the descriptor only, which should stop the qos channel as
1066 	 * the last descriptor removed.
1067 	 */
1068 	spdk_bdev_close(g_desc);
1069 	poll_threads();
1070 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1071 
1072 	/*
1073 	 * Open the bdev again which shall setup the qos channel as the
1074 	 * channels are valid.
1075 	 */
1076 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1077 	poll_threads();
1078 	CU_ASSERT(bdev->internal.qos->ch != NULL);
1079 
1080 	/* Tear down the channels */
1081 	set_thread(0);
1082 	spdk_put_io_channel(io_ch[0]);
1083 	set_thread(1);
1084 	spdk_put_io_channel(io_ch[1]);
1085 	poll_threads();
1086 	set_thread(0);
1087 
1088 	/* Close the descriptor, which should stop the qos channel */
1089 	spdk_bdev_close(g_desc);
1090 	poll_threads();
1091 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1092 
1093 	/* Open the bdev again, no qos channel setup without valid channels. */
1094 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1095 	poll_threads();
1096 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1097 
1098 	/* Create the channels in reverse order. */
1099 	set_thread(1);
1100 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1101 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1102 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1103 
1104 	set_thread(0);
1105 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1106 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1107 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1108 
1109 	/* Confirm that the qos thread is now thread 1 */
1110 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
1111 
1112 	/* Tear down the channels */
1113 	set_thread(0);
1114 	spdk_put_io_channel(io_ch[0]);
1115 	set_thread(1);
1116 	spdk_put_io_channel(io_ch[1]);
1117 	poll_threads();
1118 
1119 	set_thread(0);
1120 
1121 	teardown_test();
1122 }
1123 
1124 static void
1125 io_during_qos_queue(void)
1126 {
1127 	struct spdk_io_channel *io_ch[2];
1128 	struct spdk_bdev_channel *bdev_ch[2];
1129 	struct spdk_bdev *bdev;
1130 	enum spdk_bdev_io_status status0, status1, status2;
1131 	int rc;
1132 
1133 	setup_test();
1134 	MOCK_SET(spdk_get_ticks, 0);
1135 
1136 	/* Enable QoS */
1137 	bdev = &g_bdev.bdev;
1138 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1139 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1140 	TAILQ_INIT(&bdev->internal.qos->queued);
1141 	/*
1142 	 * Enable read/write IOPS, read only byte per sec, write only
1143 	 * byte per sec and read/write byte per sec rate limits.
1144 	 * In this case, both read only and write only byte per sec
1145 	 * rate limit will take effect.
1146 	 */
1147 	/* 4000 read/write I/O per second, or 4 per millisecond */
1148 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
1149 	/* 8K byte per millisecond with 4K block size */
1150 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1151 	/* 4K byte per millisecond with 4K block size */
1152 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
1153 	/* 4K byte per millisecond with 4K block size */
1154 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
1155 
1156 	g_get_io_channel = true;
1157 
1158 	/* Create channels */
1159 	set_thread(0);
1160 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1161 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1162 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1163 
1164 	set_thread(1);
1165 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1166 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1167 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1168 
1169 	/* Send two read I/Os */
1170 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1171 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1172 	CU_ASSERT(rc == 0);
1173 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1174 	set_thread(0);
1175 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1176 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1177 	CU_ASSERT(rc == 0);
1178 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1179 	/* Send one write I/O */
1180 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
1181 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
1182 	CU_ASSERT(rc == 0);
1183 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
1184 
1185 	/* Complete any I/O that arrived at the disk */
1186 	poll_threads();
1187 	set_thread(1);
1188 	stub_complete_io(g_bdev.io_target, 0);
1189 	set_thread(0);
1190 	stub_complete_io(g_bdev.io_target, 0);
1191 	poll_threads();
1192 
1193 	/* Only one of the two read I/Os should complete. (logical XOR) */
1194 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
1195 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1196 	} else {
1197 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1198 	}
1199 	/* The write I/O should complete. */
1200 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
1201 
1202 	/* Advance in time by a millisecond */
1203 	spdk_delay_us(1000);
1204 
1205 	/* Complete more I/O */
1206 	poll_threads();
1207 	set_thread(1);
1208 	stub_complete_io(g_bdev.io_target, 0);
1209 	set_thread(0);
1210 	stub_complete_io(g_bdev.io_target, 0);
1211 	poll_threads();
1212 
1213 	/* Now the second read I/O should be done */
1214 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
1215 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1216 
1217 	/* Tear down the channels */
1218 	set_thread(1);
1219 	spdk_put_io_channel(io_ch[1]);
1220 	set_thread(0);
1221 	spdk_put_io_channel(io_ch[0]);
1222 	poll_threads();
1223 
1224 	teardown_test();
1225 }
1226 
1227 static void
1228 io_during_qos_reset(void)
1229 {
1230 	struct spdk_io_channel *io_ch[2];
1231 	struct spdk_bdev_channel *bdev_ch[2];
1232 	struct spdk_bdev *bdev;
1233 	enum spdk_bdev_io_status status0, status1, reset_status;
1234 	int rc;
1235 
1236 	setup_test();
1237 	MOCK_SET(spdk_get_ticks, 0);
1238 
1239 	/* Enable QoS */
1240 	bdev = &g_bdev.bdev;
1241 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1242 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1243 	TAILQ_INIT(&bdev->internal.qos->queued);
1244 	/*
1245 	 * Enable read/write IOPS, write only byte per sec and
1246 	 * read/write byte per second rate limits.
1247 	 * In this case, read/write byte per second rate limit will
1248 	 * take effect first.
1249 	 */
1250 	/* 2000 read/write I/O per second, or 2 per millisecond */
1251 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1252 	/* 4K byte per millisecond with 4K block size */
1253 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
1254 	/* 8K byte per millisecond with 4K block size */
1255 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
1256 
1257 	g_get_io_channel = true;
1258 
1259 	/* Create channels */
1260 	set_thread(0);
1261 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1262 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1263 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1264 
1265 	set_thread(1);
1266 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1267 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1268 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1269 
1270 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1271 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1272 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1273 	CU_ASSERT(rc == 0);
1274 	set_thread(0);
1275 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1276 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1277 	CU_ASSERT(rc == 0);
1278 
1279 	poll_threads();
1280 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1281 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1282 
1283 	/* Reset the bdev. */
1284 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1285 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1286 	CU_ASSERT(rc == 0);
1287 
1288 	/* Complete any I/O that arrived at the disk */
1289 	poll_threads();
1290 	set_thread(1);
1291 	stub_complete_io(g_bdev.io_target, 0);
1292 	set_thread(0);
1293 	stub_complete_io(g_bdev.io_target, 0);
1294 	poll_threads();
1295 
1296 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1297 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1298 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1299 
1300 	/* Tear down the channels */
1301 	set_thread(1);
1302 	spdk_put_io_channel(io_ch[1]);
1303 	set_thread(0);
1304 	spdk_put_io_channel(io_ch[0]);
1305 	poll_threads();
1306 
1307 	teardown_test();
1308 }
1309 
1310 static void
1311 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1312 {
1313 	enum spdk_bdev_io_status *status = cb_arg;
1314 
1315 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1316 	spdk_bdev_free_io(bdev_io);
1317 }
1318 
1319 static void
1320 enomem(void)
1321 {
1322 	struct spdk_io_channel *io_ch;
1323 	struct spdk_bdev_channel *bdev_ch;
1324 	struct spdk_bdev_shared_resource *shared_resource;
1325 	struct ut_bdev_channel *ut_ch;
1326 	const uint32_t IO_ARRAY_SIZE = 64;
1327 	const uint32_t AVAIL = 20;
1328 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1329 	uint32_t nomem_cnt, i;
1330 	struct spdk_bdev_io *first_io;
1331 	int rc;
1332 
1333 	setup_test();
1334 
1335 	set_thread(0);
1336 	io_ch = spdk_bdev_get_io_channel(g_desc);
1337 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1338 	shared_resource = bdev_ch->shared_resource;
1339 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1340 	ut_ch->avail_cnt = AVAIL;
1341 
1342 	/* First submit a number of IOs equal to what the channel can support. */
1343 	for (i = 0; i < AVAIL; i++) {
1344 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1345 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1346 		CU_ASSERT(rc == 0);
1347 	}
1348 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1349 
1350 	/*
1351 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1352 	 *  the enomem_io list.
1353 	 */
1354 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1355 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1356 	CU_ASSERT(rc == 0);
1357 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1358 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1359 
1360 	/*
1361 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1362 	 *  the first_io above.
1363 	 */
1364 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1365 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1366 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1367 		CU_ASSERT(rc == 0);
1368 	}
1369 
1370 	/* Assert that first_io is still at the head of the list. */
1371 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1372 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1373 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1374 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1375 
1376 	/*
1377 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1378 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1379 	 *  list.
1380 	 */
1381 	stub_complete_io(g_bdev.io_target, 1);
1382 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1383 
1384 	/*
1385 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1386 	 *  and we should see I/O get resubmitted to the test bdev module.
1387 	 */
1388 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1389 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1390 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1391 
1392 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1393 	stub_complete_io(g_bdev.io_target, 1);
1394 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1395 
1396 	/*
1397 	 * Send a reset and confirm that all I/O are completed, including the ones that
1398 	 *  were queued on the nomem_io list.
1399 	 */
1400 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1401 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1402 	poll_threads();
1403 	CU_ASSERT(rc == 0);
1404 	/* This will complete the reset. */
1405 	stub_complete_io(g_bdev.io_target, 0);
1406 
1407 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1408 	CU_ASSERT(shared_resource->io_outstanding == 0);
1409 
1410 	spdk_put_io_channel(io_ch);
1411 	poll_threads();
1412 	teardown_test();
1413 }
1414 
1415 static void
1416 enomem_multi_bdev(void)
1417 {
1418 	struct spdk_io_channel *io_ch;
1419 	struct spdk_bdev_channel *bdev_ch;
1420 	struct spdk_bdev_shared_resource *shared_resource;
1421 	struct ut_bdev_channel *ut_ch;
1422 	const uint32_t IO_ARRAY_SIZE = 64;
1423 	const uint32_t AVAIL = 20;
1424 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1425 	uint32_t i;
1426 	struct ut_bdev *second_bdev;
1427 	struct spdk_bdev_desc *second_desc = NULL;
1428 	struct spdk_bdev_channel *second_bdev_ch;
1429 	struct spdk_io_channel *second_ch;
1430 	int rc;
1431 
1432 	setup_test();
1433 
1434 	/* Register second bdev with the same io_target  */
1435 	second_bdev = calloc(1, sizeof(*second_bdev));
1436 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1437 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1438 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1439 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1440 
1441 	set_thread(0);
1442 	io_ch = spdk_bdev_get_io_channel(g_desc);
1443 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1444 	shared_resource = bdev_ch->shared_resource;
1445 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1446 	ut_ch->avail_cnt = AVAIL;
1447 
1448 	second_ch = spdk_bdev_get_io_channel(second_desc);
1449 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1450 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1451 
1452 	/* Saturate io_target through bdev A. */
1453 	for (i = 0; i < AVAIL; i++) {
1454 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1455 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1456 		CU_ASSERT(rc == 0);
1457 	}
1458 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1459 
1460 	/*
1461 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1462 	 * and then go onto the nomem_io list.
1463 	 */
1464 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1465 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1466 	CU_ASSERT(rc == 0);
1467 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1468 
1469 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1470 	stub_complete_io(g_bdev.io_target, AVAIL);
1471 
1472 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1473 	CU_ASSERT(shared_resource->io_outstanding == 1);
1474 
1475 	/* Now complete our retried I/O  */
1476 	stub_complete_io(g_bdev.io_target, 1);
1477 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1478 
1479 	spdk_put_io_channel(io_ch);
1480 	spdk_put_io_channel(second_ch);
1481 	spdk_bdev_close(second_desc);
1482 	unregister_bdev(second_bdev);
1483 	poll_threads();
1484 	free(second_bdev);
1485 	teardown_test();
1486 }
1487 
1488 static void
1489 enomem_multi_bdev_unregister(void)
1490 {
1491 	struct spdk_io_channel *io_ch;
1492 	struct spdk_bdev_channel *bdev_ch;
1493 	struct spdk_bdev_shared_resource *shared_resource;
1494 	struct ut_bdev_channel *ut_ch;
1495 	const uint32_t IO_ARRAY_SIZE = 64;
1496 	const uint32_t AVAIL = 20;
1497 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1498 	uint32_t i;
1499 	int rc;
1500 
1501 	setup_test();
1502 
1503 	set_thread(0);
1504 	io_ch = spdk_bdev_get_io_channel(g_desc);
1505 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1506 	shared_resource = bdev_ch->shared_resource;
1507 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1508 	ut_ch->avail_cnt = AVAIL;
1509 
1510 	/* Saturate io_target through the bdev. */
1511 	for (i = 0; i < AVAIL; i++) {
1512 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1513 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1514 		CU_ASSERT(rc == 0);
1515 	}
1516 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1517 
1518 	/*
1519 	 * Now submit I/O through the bdev. This should fail with ENOMEM
1520 	 * and then go onto the nomem_io list.
1521 	 */
1522 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1523 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1524 	CU_ASSERT(rc == 0);
1525 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1526 
1527 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
1528 	unregister_bdev(&g_bdev);
1529 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
1530 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1531 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
1532 
1533 	/* Complete the bdev's I/O. */
1534 	stub_complete_io(g_bdev.io_target, AVAIL);
1535 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1536 
1537 	spdk_put_io_channel(io_ch);
1538 	poll_threads();
1539 	teardown_test();
1540 }
1541 
1542 static void
1543 enomem_multi_io_target(void)
1544 {
1545 	struct spdk_io_channel *io_ch;
1546 	struct spdk_bdev_channel *bdev_ch;
1547 	struct ut_bdev_channel *ut_ch;
1548 	const uint32_t IO_ARRAY_SIZE = 64;
1549 	const uint32_t AVAIL = 20;
1550 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1551 	uint32_t i;
1552 	int new_io_device;
1553 	struct ut_bdev *second_bdev;
1554 	struct spdk_bdev_desc *second_desc = NULL;
1555 	struct spdk_bdev_channel *second_bdev_ch;
1556 	struct spdk_io_channel *second_ch;
1557 	int rc;
1558 
1559 	setup_test();
1560 
1561 	/* Create new io_target and a second bdev using it */
1562 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1563 				sizeof(struct ut_bdev_channel), NULL);
1564 	second_bdev = calloc(1, sizeof(*second_bdev));
1565 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1566 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1567 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1568 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1569 
1570 	set_thread(0);
1571 	io_ch = spdk_bdev_get_io_channel(g_desc);
1572 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1573 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1574 	ut_ch->avail_cnt = AVAIL;
1575 
1576 	/* Different io_target should imply a different shared_resource */
1577 	second_ch = spdk_bdev_get_io_channel(second_desc);
1578 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1579 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1580 
1581 	/* Saturate io_target through bdev A. */
1582 	for (i = 0; i < AVAIL; i++) {
1583 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1584 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1585 		CU_ASSERT(rc == 0);
1586 	}
1587 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1588 
1589 	/* Issue one more I/O to fill ENOMEM list. */
1590 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1591 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1592 	CU_ASSERT(rc == 0);
1593 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1594 
1595 	/*
1596 	 * Now submit I/O through the second bdev. This should go through and complete
1597 	 * successfully because we're using a different io_device underneath.
1598 	 */
1599 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1600 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1601 	CU_ASSERT(rc == 0);
1602 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1603 	stub_complete_io(second_bdev->io_target, 1);
1604 
1605 	/* Cleanup; Complete outstanding I/O. */
1606 	stub_complete_io(g_bdev.io_target, AVAIL);
1607 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1608 	/* Complete the ENOMEM I/O */
1609 	stub_complete_io(g_bdev.io_target, 1);
1610 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1611 
1612 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1613 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1614 	spdk_put_io_channel(io_ch);
1615 	spdk_put_io_channel(second_ch);
1616 	spdk_bdev_close(second_desc);
1617 	unregister_bdev(second_bdev);
1618 	spdk_io_device_unregister(&new_io_device, NULL);
1619 	poll_threads();
1620 	free(second_bdev);
1621 	teardown_test();
1622 }
1623 
1624 static void
1625 qos_dynamic_enable_done(void *cb_arg, int status)
1626 {
1627 	int *rc = cb_arg;
1628 	*rc = status;
1629 }
1630 
1631 static void
1632 qos_dynamic_enable(void)
1633 {
1634 	struct spdk_io_channel *io_ch[2];
1635 	struct spdk_bdev_channel *bdev_ch[2];
1636 	struct spdk_bdev *bdev;
1637 	enum spdk_bdev_io_status bdev_io_status[2];
1638 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1639 	int status, second_status, rc, i;
1640 
1641 	setup_test();
1642 	MOCK_SET(spdk_get_ticks, 0);
1643 
1644 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1645 		limits[i] = UINT64_MAX;
1646 	}
1647 
1648 	bdev = &g_bdev.bdev;
1649 
1650 	g_get_io_channel = true;
1651 
1652 	/* Create channels */
1653 	set_thread(0);
1654 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1655 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1656 	CU_ASSERT(bdev_ch[0]->flags == 0);
1657 
1658 	set_thread(1);
1659 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1660 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1661 	CU_ASSERT(bdev_ch[1]->flags == 0);
1662 
1663 	set_thread(0);
1664 
1665 	/*
1666 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1667 	 * Read only byte and Write only byte per second
1668 	 * rate limits.
1669 	 * More than 10 I/Os allowed per timeslice.
1670 	 */
1671 	status = -1;
1672 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1673 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1674 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1675 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1676 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1677 	poll_threads();
1678 	CU_ASSERT(status == 0);
1679 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1680 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1681 
1682 	/*
1683 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1684 	 * Additional I/O will then be queued.
1685 	 */
1686 	set_thread(0);
1687 	for (i = 0; i < 10; i++) {
1688 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1689 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1690 		CU_ASSERT(rc == 0);
1691 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1692 		poll_thread(0);
1693 		stub_complete_io(g_bdev.io_target, 0);
1694 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1695 	}
1696 
1697 	/*
1698 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1699 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1700 	 *  1) are not aborted
1701 	 *  2) are sent back to their original thread for resubmission
1702 	 */
1703 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1704 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1705 	CU_ASSERT(rc == 0);
1706 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1707 	set_thread(1);
1708 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1709 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1710 	CU_ASSERT(rc == 0);
1711 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1712 	poll_threads();
1713 
1714 	/*
1715 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1716 	 * Read only byte rate limits
1717 	 */
1718 	status = -1;
1719 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1720 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1721 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1722 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1723 	poll_threads();
1724 	CU_ASSERT(status == 0);
1725 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1726 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1727 
1728 	/* Disable QoS: Write only Byte per second rate limit */
1729 	status = -1;
1730 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1731 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1732 	poll_threads();
1733 	CU_ASSERT(status == 0);
1734 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1735 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1736 
1737 	/*
1738 	 * All I/O should have been resubmitted back on their original thread.  Complete
1739 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1740 	 */
1741 	set_thread(0);
1742 	stub_complete_io(g_bdev.io_target, 0);
1743 	poll_threads();
1744 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1745 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1746 
1747 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1748 	set_thread(1);
1749 	stub_complete_io(g_bdev.io_target, 0);
1750 	poll_threads();
1751 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1752 
1753 	/* Disable QoS again */
1754 	status = -1;
1755 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1756 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1757 	poll_threads();
1758 	CU_ASSERT(status == 0); /* This should succeed */
1759 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1760 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1761 
1762 	/* Enable QoS on thread 0 */
1763 	status = -1;
1764 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1765 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1766 	poll_threads();
1767 	CU_ASSERT(status == 0);
1768 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1769 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1770 
1771 	/* Disable QoS on thread 1 */
1772 	set_thread(1);
1773 	status = -1;
1774 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1775 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1776 	/* Don't poll yet. This should leave the channels with QoS enabled */
1777 	CU_ASSERT(status == -1);
1778 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1779 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1780 
1781 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1782 	second_status = 0;
1783 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1784 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1785 	poll_threads();
1786 	CU_ASSERT(status == 0); /* The disable should succeed */
1787 	CU_ASSERT(second_status < 0); /* The enable should fail */
1788 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1789 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1790 
1791 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1792 	status = -1;
1793 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1794 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1795 	poll_threads();
1796 	CU_ASSERT(status == 0);
1797 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1798 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1799 
1800 	/* Tear down the channels */
1801 	set_thread(0);
1802 	spdk_put_io_channel(io_ch[0]);
1803 	set_thread(1);
1804 	spdk_put_io_channel(io_ch[1]);
1805 	poll_threads();
1806 
1807 	set_thread(0);
1808 	teardown_test();
1809 }
1810 
1811 static void
1812 histogram_status_cb(void *cb_arg, int status)
1813 {
1814 	g_status = status;
1815 }
1816 
1817 static void
1818 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1819 {
1820 	g_status = status;
1821 	g_histogram = histogram;
1822 }
1823 
1824 static void
1825 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1826 		   uint64_t total, uint64_t so_far)
1827 {
1828 	g_count += count;
1829 }
1830 
1831 static void
1832 bdev_histograms_mt(void)
1833 {
1834 	struct spdk_io_channel *ch[2];
1835 	struct spdk_histogram_data *histogram;
1836 	uint8_t buf[4096];
1837 	int status = false;
1838 	int rc;
1839 
1840 
1841 	setup_test();
1842 
1843 	set_thread(0);
1844 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1845 	CU_ASSERT(ch[0] != NULL);
1846 
1847 	set_thread(1);
1848 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1849 	CU_ASSERT(ch[1] != NULL);
1850 
1851 
1852 	/* Enable histogram */
1853 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1854 	poll_threads();
1855 	CU_ASSERT(g_status == 0);
1856 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1857 
1858 	/* Allocate histogram */
1859 	histogram = spdk_histogram_data_alloc();
1860 
1861 	/* Check if histogram is zeroed */
1862 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1863 	poll_threads();
1864 	CU_ASSERT(g_status == 0);
1865 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1866 
1867 	g_count = 0;
1868 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1869 
1870 	CU_ASSERT(g_count == 0);
1871 
1872 	set_thread(0);
1873 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1874 	CU_ASSERT(rc == 0);
1875 
1876 	spdk_delay_us(10);
1877 	stub_complete_io(g_bdev.io_target, 1);
1878 	poll_threads();
1879 	CU_ASSERT(status == true);
1880 
1881 
1882 	set_thread(1);
1883 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1884 	CU_ASSERT(rc == 0);
1885 
1886 	spdk_delay_us(10);
1887 	stub_complete_io(g_bdev.io_target, 1);
1888 	poll_threads();
1889 	CU_ASSERT(status == true);
1890 
1891 	set_thread(0);
1892 
1893 	/* Check if histogram gathered data from all I/O channels */
1894 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1895 	poll_threads();
1896 	CU_ASSERT(g_status == 0);
1897 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1898 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1899 
1900 	g_count = 0;
1901 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1902 	CU_ASSERT(g_count == 2);
1903 
1904 	/* Disable histogram */
1905 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1906 	poll_threads();
1907 	CU_ASSERT(g_status == 0);
1908 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1909 
1910 	spdk_histogram_data_free(histogram);
1911 
1912 	/* Tear down the channels */
1913 	set_thread(0);
1914 	spdk_put_io_channel(ch[0]);
1915 	set_thread(1);
1916 	spdk_put_io_channel(ch[1]);
1917 	poll_threads();
1918 	set_thread(0);
1919 	teardown_test();
1920 
1921 }
1922 
1923 struct timeout_io_cb_arg {
1924 	struct iovec iov;
1925 	uint8_t type;
1926 };
1927 
1928 static int
1929 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1930 {
1931 	struct spdk_bdev_io *bdev_io;
1932 	int n = 0;
1933 
1934 	if (!ch) {
1935 		return -1;
1936 	}
1937 
1938 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1939 		n++;
1940 	}
1941 
1942 	return n;
1943 }
1944 
1945 static void
1946 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1947 {
1948 	struct timeout_io_cb_arg *ctx = cb_arg;
1949 
1950 	ctx->type = bdev_io->type;
1951 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1952 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1953 }
1954 
1955 static bool g_io_done;
1956 
1957 static void
1958 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1959 {
1960 	g_io_done = true;
1961 	spdk_bdev_free_io(bdev_io);
1962 }
1963 
1964 static void
1965 bdev_set_io_timeout_mt(void)
1966 {
1967 	struct spdk_io_channel *ch[3];
1968 	struct spdk_bdev_channel *bdev_ch[3];
1969 	struct timeout_io_cb_arg cb_arg;
1970 
1971 	setup_test();
1972 
1973 	g_bdev.bdev.optimal_io_boundary = 16;
1974 	g_bdev.bdev.split_on_optimal_io_boundary = true;
1975 
1976 	set_thread(0);
1977 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1978 	CU_ASSERT(ch[0] != NULL);
1979 
1980 	set_thread(1);
1981 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1982 	CU_ASSERT(ch[1] != NULL);
1983 
1984 	set_thread(2);
1985 	ch[2] = spdk_bdev_get_io_channel(g_desc);
1986 	CU_ASSERT(ch[2] != NULL);
1987 
1988 	/* Multi-thread mode
1989 	 * 1, Check the poller was registered successfully
1990 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
1991 	 * 3, Check the link int the bdev_ch works right.
1992 	 * 4, Close desc and put io channel during the timeout poller is polling
1993 	 */
1994 
1995 	/* In desc thread set the timeout */
1996 	set_thread(0);
1997 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
1998 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
1999 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
2000 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
2001 
2002 	/* check the IO submitted list and timeout handler */
2003 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
2004 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
2005 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
2006 
2007 	set_thread(1);
2008 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2009 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
2010 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
2011 
2012 	/* Now test that a single-vector command is split correctly.
2013 	 * Offset 14, length 8, payload 0xF000
2014 	 *  Child - Offset 14, length 2, payload 0xF000
2015 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2016 	 *
2017 	 * Set up the expected values before calling spdk_bdev_read_blocks
2018 	 */
2019 	set_thread(2);
2020 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
2021 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
2022 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
2023 
2024 	set_thread(0);
2025 	memset(&cb_arg, 0, sizeof(cb_arg));
2026 	spdk_delay_us(3 * spdk_get_ticks_hz());
2027 	poll_threads();
2028 	CU_ASSERT(cb_arg.type == 0);
2029 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2030 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2031 
2032 	/* Now the time reach the limit */
2033 	spdk_delay_us(3 * spdk_get_ticks_hz());
2034 	poll_thread(0);
2035 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2036 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2037 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2038 	stub_complete_io(g_bdev.io_target, 1);
2039 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
2040 
2041 	memset(&cb_arg, 0, sizeof(cb_arg));
2042 	set_thread(1);
2043 	poll_thread(1);
2044 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2045 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2046 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2047 	stub_complete_io(g_bdev.io_target, 1);
2048 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
2049 
2050 	memset(&cb_arg, 0, sizeof(cb_arg));
2051 	set_thread(2);
2052 	poll_thread(2);
2053 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2054 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2055 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
2056 	stub_complete_io(g_bdev.io_target, 1);
2057 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
2058 	stub_complete_io(g_bdev.io_target, 1);
2059 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
2060 
2061 	/* Run poll_timeout_done() it means complete the timeout poller */
2062 	set_thread(0);
2063 	poll_thread(0);
2064 	CU_ASSERT(g_desc->refs == 0);
2065 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2066 	set_thread(1);
2067 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
2068 	set_thread(2);
2069 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
2070 
2071 	/* Trigger timeout poller to run again, desc->refs is incremented.
2072 	 * In thread 0 we destroy the io channel before timeout poller runs.
2073 	 * Timeout callback is not called on thread 0.
2074 	 */
2075 	spdk_delay_us(6 * spdk_get_ticks_hz());
2076 	memset(&cb_arg, 0, sizeof(cb_arg));
2077 	set_thread(0);
2078 	stub_complete_io(g_bdev.io_target, 1);
2079 	spdk_put_io_channel(ch[0]);
2080 	poll_thread(0);
2081 	CU_ASSERT(g_desc->refs == 1)
2082 	CU_ASSERT(cb_arg.type == 0);
2083 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2084 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2085 
2086 	/* In thread 1 timeout poller runs then we destroy the io channel
2087 	 * Timeout callback is called on thread 1.
2088 	 */
2089 	memset(&cb_arg, 0, sizeof(cb_arg));
2090 	set_thread(1);
2091 	poll_thread(1);
2092 	stub_complete_io(g_bdev.io_target, 1);
2093 	spdk_put_io_channel(ch[1]);
2094 	poll_thread(1);
2095 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2096 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2097 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
2098 
2099 	/* Close the desc.
2100 	 * Unregister the timeout poller first.
2101 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
2102 	 */
2103 	set_thread(0);
2104 	spdk_bdev_close(g_desc);
2105 	CU_ASSERT(g_desc->refs == 1);
2106 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
2107 
2108 	/* Timeout poller runs on thread 2 then we destroy the io channel.
2109 	 * Desc is closed so we would exit the timeout poller directly.
2110 	 * timeout callback is not called on thread 2.
2111 	 */
2112 	memset(&cb_arg, 0, sizeof(cb_arg));
2113 	set_thread(2);
2114 	poll_thread(2);
2115 	stub_complete_io(g_bdev.io_target, 1);
2116 	spdk_put_io_channel(ch[2]);
2117 	poll_thread(2);
2118 	CU_ASSERT(cb_arg.type == 0);
2119 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2120 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2121 
2122 	set_thread(0);
2123 	poll_thread(0);
2124 	g_teardown_done = false;
2125 	unregister_bdev(&g_bdev);
2126 	spdk_io_device_unregister(&g_io_device, NULL);
2127 	spdk_bdev_finish(finish_cb, NULL);
2128 	spdk_iobuf_finish(finish_cb, NULL);
2129 	poll_threads();
2130 	memset(&g_bdev, 0, sizeof(g_bdev));
2131 	CU_ASSERT(g_teardown_done == true);
2132 	g_teardown_done = false;
2133 	free_threads();
2134 	free_cores();
2135 }
2136 
2137 static bool g_io_done2;
2138 static bool g_lock_lba_range_done;
2139 static bool g_unlock_lba_range_done;
2140 
2141 static void
2142 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2143 {
2144 	g_io_done2 = true;
2145 	spdk_bdev_free_io(bdev_io);
2146 }
2147 
2148 static void
2149 lock_lba_range_done(void *ctx, int status)
2150 {
2151 	g_lock_lba_range_done = true;
2152 }
2153 
2154 static void
2155 unlock_lba_range_done(void *ctx, int status)
2156 {
2157 	g_unlock_lba_range_done = true;
2158 }
2159 
2160 static uint32_t
2161 stub_channel_outstanding_cnt(void *io_target)
2162 {
2163 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
2164 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
2165 	uint32_t outstanding_cnt;
2166 
2167 	outstanding_cnt = ch->outstanding_cnt;
2168 
2169 	spdk_put_io_channel(_ch);
2170 	return outstanding_cnt;
2171 }
2172 
2173 static void
2174 lock_lba_range_then_submit_io(void)
2175 {
2176 	struct spdk_bdev_desc *desc = NULL;
2177 	void *io_target;
2178 	struct spdk_io_channel *io_ch[3];
2179 	struct spdk_bdev_channel *bdev_ch[3];
2180 	struct lba_range *range;
2181 	char buf[4096];
2182 	int ctx0, ctx1, ctx2;
2183 	int rc;
2184 
2185 	setup_test();
2186 
2187 	io_target = g_bdev.io_target;
2188 	desc = g_desc;
2189 
2190 	set_thread(0);
2191 	io_ch[0] = spdk_bdev_get_io_channel(desc);
2192 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
2193 	CU_ASSERT(io_ch[0] != NULL);
2194 
2195 	set_thread(1);
2196 	io_ch[1] = spdk_bdev_get_io_channel(desc);
2197 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
2198 	CU_ASSERT(io_ch[1] != NULL);
2199 
2200 	set_thread(0);
2201 	g_lock_lba_range_done = false;
2202 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
2203 	CU_ASSERT(rc == 0);
2204 	poll_threads();
2205 
2206 	/* The lock should immediately become valid, since there are no outstanding
2207 	 * write I/O.
2208 	 */
2209 	CU_ASSERT(g_lock_lba_range_done == true);
2210 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
2211 	SPDK_CU_ASSERT_FATAL(range != NULL);
2212 	CU_ASSERT(range->offset == 20);
2213 	CU_ASSERT(range->length == 10);
2214 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
2215 
2216 	g_io_done = false;
2217 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2218 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2219 	CU_ASSERT(rc == 0);
2220 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2221 
2222 	stub_complete_io(io_target, 1);
2223 	poll_threads();
2224 	CU_ASSERT(g_io_done == true);
2225 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2226 
2227 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
2228 	 * holding the lock is submitting the write I/O.
2229 	 */
2230 	g_io_done = false;
2231 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2232 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2233 	CU_ASSERT(rc == 0);
2234 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2235 
2236 	stub_complete_io(io_target, 1);
2237 	poll_threads();
2238 	CU_ASSERT(g_io_done == true);
2239 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2240 
2241 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
2242 	set_thread(1);
2243 	g_io_done = false;
2244 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2245 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
2246 	CU_ASSERT(rc == 0);
2247 	poll_threads();
2248 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2249 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2250 	CU_ASSERT(g_io_done == false);
2251 
2252 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
2253 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
2254 	CU_ASSERT(rc == -EINVAL);
2255 
2256 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
2257 	 * The new channel should inherit the active locks from the bdev's internal list.
2258 	 */
2259 	set_thread(2);
2260 	io_ch[2] = spdk_bdev_get_io_channel(desc);
2261 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
2262 	CU_ASSERT(io_ch[2] != NULL);
2263 
2264 	g_io_done2 = false;
2265 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2266 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
2267 	CU_ASSERT(rc == 0);
2268 	poll_threads();
2269 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2270 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2271 	CU_ASSERT(g_io_done2 == false);
2272 
2273 	set_thread(0);
2274 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
2275 	CU_ASSERT(rc == 0);
2276 	poll_threads();
2277 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
2278 
2279 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
2280 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2281 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2282 
2283 	set_thread(1);
2284 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2285 	stub_complete_io(io_target, 1);
2286 	set_thread(2);
2287 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2288 	stub_complete_io(io_target, 1);
2289 
2290 	poll_threads();
2291 	CU_ASSERT(g_io_done == true);
2292 	CU_ASSERT(g_io_done2 == true);
2293 
2294 	/* Tear down the channels */
2295 	set_thread(0);
2296 	spdk_put_io_channel(io_ch[0]);
2297 	set_thread(1);
2298 	spdk_put_io_channel(io_ch[1]);
2299 	set_thread(2);
2300 	spdk_put_io_channel(io_ch[2]);
2301 	poll_threads();
2302 	set_thread(0);
2303 	teardown_test();
2304 }
2305 
2306 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2307  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2308  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2309  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2310  * completes. Test this behavior.
2311  */
2312 static void
2313 unregister_during_reset(void)
2314 {
2315 	struct spdk_io_channel *io_ch[2];
2316 	bool done_reset = false, done_unregister = false;
2317 	int rc;
2318 
2319 	setup_test();
2320 	set_thread(0);
2321 
2322 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2323 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2324 
2325 	set_thread(1);
2326 
2327 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2328 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2329 
2330 	set_thread(0);
2331 
2332 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2333 
2334 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2335 	CU_ASSERT(rc == 0);
2336 
2337 	set_thread(0);
2338 
2339 	poll_thread_times(0, 1);
2340 
2341 	spdk_bdev_close(g_desc);
2342 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2343 
2344 	CU_ASSERT(done_reset == false);
2345 	CU_ASSERT(done_unregister == false);
2346 
2347 	poll_threads();
2348 
2349 	stub_complete_io(g_bdev.io_target, 0);
2350 
2351 	poll_threads();
2352 
2353 	CU_ASSERT(done_reset == true);
2354 	CU_ASSERT(done_unregister == false);
2355 
2356 	spdk_put_io_channel(io_ch[0]);
2357 
2358 	set_thread(1);
2359 
2360 	spdk_put_io_channel(io_ch[1]);
2361 
2362 	poll_threads();
2363 
2364 	CU_ASSERT(done_unregister == true);
2365 
2366 	/* Restore the original g_bdev so that we can use teardown_test(). */
2367 	set_thread(0);
2368 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2369 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2370 	teardown_test();
2371 }
2372 
2373 static void
2374 bdev_init_wt_cb(void *done, int rc)
2375 {
2376 }
2377 
2378 static int
2379 wrong_thread_setup(void)
2380 {
2381 	allocate_cores(1);
2382 	allocate_threads(2);
2383 	set_thread(0);
2384 
2385 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
2386 				ut_accel_ch_destroy_cb, 0, NULL);
2387 	spdk_bdev_initialize(bdev_init_wt_cb, NULL);
2388 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
2389 				sizeof(struct ut_bdev_channel), NULL);
2390 
2391 	set_thread(1);
2392 
2393 	return 0;
2394 }
2395 
2396 static int
2397 wrong_thread_teardown(void)
2398 {
2399 	int rc = 0;
2400 
2401 	set_thread(0);
2402 
2403 	g_teardown_done = false;
2404 	spdk_io_device_unregister(&g_io_device, NULL);
2405 	spdk_bdev_finish(finish_cb, NULL);
2406 	poll_threads();
2407 	memset(&g_bdev, 0, sizeof(g_bdev));
2408 	if (!g_teardown_done) {
2409 		fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__);
2410 		rc = -1;
2411 	}
2412 	g_teardown_done = false;
2413 
2414 	spdk_io_device_unregister(&g_accel_io_device, NULL);
2415 	free_threads();
2416 	free_cores();
2417 
2418 	return rc;
2419 }
2420 
2421 static void
2422 _bdev_unregistered_wt(void *ctx, int rc)
2423 {
2424 	struct spdk_thread **threadp = ctx;
2425 
2426 	*threadp = spdk_get_thread();
2427 }
2428 
2429 static void
2430 spdk_bdev_register_wt(void)
2431 {
2432 	struct spdk_bdev bdev = { 0 };
2433 	int rc;
2434 	struct spdk_thread *unreg_thread;
2435 
2436 	bdev.name = "wt_bdev";
2437 	bdev.fn_table = &fn_table;
2438 	bdev.module = &bdev_ut_if;
2439 	bdev.blocklen = 4096;
2440 	bdev.blockcnt = 1024;
2441 
2442 	/* Can register only on app thread */
2443 	rc = spdk_bdev_register(&bdev);
2444 	CU_ASSERT(rc == -EINVAL);
2445 
2446 	/* Can unregister on any thread */
2447 	set_thread(0);
2448 	rc = spdk_bdev_register(&bdev);
2449 	CU_ASSERT(rc == 0);
2450 	set_thread(1);
2451 	unreg_thread = NULL;
2452 	spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread);
2453 	poll_threads();
2454 	CU_ASSERT(unreg_thread == spdk_get_thread());
2455 
2456 	/* Can unregister by name on any thread */
2457 	set_thread(0);
2458 	rc = spdk_bdev_register(&bdev);
2459 	CU_ASSERT(rc == 0);
2460 	set_thread(1);
2461 	unreg_thread = NULL;
2462 	rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt,
2463 					  &unreg_thread);
2464 	CU_ASSERT(rc == 0);
2465 	poll_threads();
2466 	CU_ASSERT(unreg_thread == spdk_get_thread());
2467 }
2468 
2469 static void
2470 wait_for_examine_cb(void *arg)
2471 {
2472 	struct spdk_thread **thread = arg;
2473 
2474 	*thread = spdk_get_thread();
2475 }
2476 
2477 static void
2478 spdk_bdev_examine_wt(void)
2479 {
2480 	int rc;
2481 	bool save_auto_examine = g_bdev_opts.bdev_auto_examine;
2482 	struct spdk_thread *thread;
2483 
2484 	g_bdev_opts.bdev_auto_examine = false;
2485 
2486 	set_thread(0);
2487 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2488 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2489 	set_thread(1);
2490 
2491 	/* Can examine only on the app thread */
2492 	rc = spdk_bdev_examine("ut_bdev_wt");
2493 	CU_ASSERT(rc == -EINVAL);
2494 	unregister_bdev(&g_bdev);
2495 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2496 
2497 	/* Can wait for examine on app thread, callback called on app thread. */
2498 	set_thread(0);
2499 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2500 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2501 	thread = NULL;
2502 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2503 	CU_ASSERT(rc == 0);
2504 	poll_threads();
2505 	CU_ASSERT(thread == spdk_get_thread());
2506 	unregister_bdev(&g_bdev);
2507 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2508 
2509 	/* Can wait for examine on non-app thread, callback called on same thread. */
2510 	set_thread(0);
2511 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2512 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2513 	thread = NULL;
2514 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2515 	CU_ASSERT(rc == 0);
2516 	poll_threads();
2517 	CU_ASSERT(thread == spdk_get_thread());
2518 	unregister_bdev(&g_bdev);
2519 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2520 
2521 	unregister_bdev(&g_bdev);
2522 	g_bdev_opts.bdev_auto_examine = save_auto_examine;
2523 }
2524 
2525 static void
2526 event_notify_and_close(void)
2527 {
2528 	int resize_notify_count = 0;
2529 	struct spdk_bdev_desc *desc = NULL;
2530 	struct spdk_bdev *bdev;
2531 	int rc;
2532 
2533 	setup_test();
2534 	set_thread(0);
2535 
2536 	/* setup_test() automatically opens the bdev, but this test needs to do
2537 	 * that in a different way. */
2538 	spdk_bdev_close(g_desc);
2539 	poll_threads();
2540 
2541 	set_thread(1);
2542 
2543 	rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc);
2544 	CU_ASSERT(rc == 0);
2545 	SPDK_CU_ASSERT_FATAL(desc != NULL);
2546 
2547 	bdev = spdk_bdev_desc_get_bdev(desc);
2548 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2549 
2550 	/* Test a normal case that a resize event is notified. */
2551 	set_thread(0);
2552 
2553 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2);
2554 	CU_ASSERT(rc == 0);
2555 	CU_ASSERT(bdev->blockcnt == 1024 * 2);
2556 	CU_ASSERT(desc->refs == 1);
2557 	CU_ASSERT(resize_notify_count == 0);
2558 
2559 	poll_threads();
2560 
2561 	CU_ASSERT(desc->refs == 0);
2562 	CU_ASSERT(resize_notify_count == 1);
2563 
2564 	/* Test a complex case if the bdev is closed after two event_notify messages are sent,
2565 	 * then both event_notify messages are discarded and the desc is freed.
2566 	 */
2567 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3);
2568 	CU_ASSERT(rc == 0);
2569 	CU_ASSERT(bdev->blockcnt == 1024 * 3);
2570 	CU_ASSERT(desc->refs == 1);
2571 	CU_ASSERT(resize_notify_count == 1);
2572 
2573 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4);
2574 	CU_ASSERT(rc == 0);
2575 	CU_ASSERT(bdev->blockcnt == 1024 * 4);
2576 	CU_ASSERT(desc->refs == 2);
2577 	CU_ASSERT(resize_notify_count == 1);
2578 
2579 	set_thread(1);
2580 
2581 	spdk_bdev_close(desc);
2582 	CU_ASSERT(desc->closed == true);
2583 	CU_ASSERT(desc->refs == 2);
2584 	CU_ASSERT(resize_notify_count == 1);
2585 
2586 	poll_threads();
2587 
2588 	CU_ASSERT(resize_notify_count == 1);
2589 
2590 	set_thread(0);
2591 
2592 	/* Restore g_desc. Then, we can execute teardown_test(). */
2593 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2594 	teardown_test();
2595 }
2596 
2597 int
2598 main(int argc, char **argv)
2599 {
2600 	CU_pSuite	suite = NULL;
2601 	CU_pSuite	suite_wt = NULL;
2602 	unsigned int	num_failures;
2603 
2604 	CU_set_error_action(CUEA_ABORT);
2605 	CU_initialize_registry();
2606 
2607 	suite = CU_add_suite("bdev", NULL, NULL);
2608 	suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown);
2609 
2610 	CU_ADD_TEST(suite, basic);
2611 	CU_ADD_TEST(suite, unregister_and_close);
2612 	CU_ADD_TEST(suite, unregister_and_close_different_threads);
2613 	CU_ADD_TEST(suite, basic_qos);
2614 	CU_ADD_TEST(suite, put_channel_during_reset);
2615 	CU_ADD_TEST(suite, aborted_reset);
2616 	CU_ADD_TEST(suite, aborted_reset_no_outstanding_io);
2617 	CU_ADD_TEST(suite, io_during_reset);
2618 	CU_ADD_TEST(suite, reset_completions);
2619 	CU_ADD_TEST(suite, io_during_qos_queue);
2620 	CU_ADD_TEST(suite, io_during_qos_reset);
2621 	CU_ADD_TEST(suite, enomem);
2622 	CU_ADD_TEST(suite, enomem_multi_bdev);
2623 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2624 	CU_ADD_TEST(suite, enomem_multi_io_target);
2625 	CU_ADD_TEST(suite, qos_dynamic_enable);
2626 	CU_ADD_TEST(suite, bdev_histograms_mt);
2627 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2628 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2629 	CU_ADD_TEST(suite, unregister_during_reset);
2630 	CU_ADD_TEST(suite_wt, spdk_bdev_register_wt);
2631 	CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt);
2632 	CU_ADD_TEST(suite, event_notify_and_close);
2633 
2634 	CU_basic_set_mode(CU_BRM_VERBOSE);
2635 	CU_basic_run_tests();
2636 	num_failures = CU_get_number_of_failures();
2637 	CU_cleanup_registry();
2638 	return num_failures;
2639 }
2640