xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision debc2429a5c6f6ac952a065aded7e886ec3b8a6a)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk_internal/cunit.h"
8 
9 #include "common/lib/ut_multithread.c"
10 #include "unit/lib/json_mock.c"
11 
12 #include "spdk/config.h"
13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 #undef SPDK_CONFIG_VTUNE
15 
16 #include "bdev/bdev.c"
17 
18 #define BDEV_UT_NUM_THREADS 3
19 
20 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
21 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
22 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
23 		int *asc, int *ascq));
24 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
25 	    "test_domain");
26 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
27 	    (struct spdk_memory_domain *domain), 0);
28 DEFINE_STUB_V(spdk_accel_sequence_finish,
29 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
30 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
31 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
32 DEFINE_STUB(spdk_accel_append_copy, int,
33 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
34 	     uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
35 	     struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
36 	     void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
37 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
38 
39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
40 int
41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
42 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
43 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
44 {
45 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
46 
47 	cpl_cb(cpl_cb_arg, 0);
48 	return 0;
49 }
50 
51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
52 int
53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
54 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
55 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
56 {
57 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
58 
59 	cpl_cb(cpl_cb_arg, 0);
60 	return 0;
61 }
62 
63 static int g_accel_io_device;
64 
65 struct spdk_io_channel *
66 spdk_accel_get_io_channel(void)
67 {
68 	return spdk_get_io_channel(&g_accel_io_device);
69 }
70 
71 struct ut_bdev {
72 	struct spdk_bdev	bdev;
73 	void			*io_target;
74 };
75 
76 struct ut_bdev_channel {
77 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
78 	uint32_t			outstanding_cnt;
79 	uint32_t			avail_cnt;
80 	struct spdk_thread		*thread;
81 	TAILQ_ENTRY(ut_bdev_channel)	link;
82 };
83 
84 int g_io_device;
85 struct ut_bdev g_bdev;
86 struct spdk_bdev_desc *g_desc;
87 bool g_teardown_done = false;
88 bool g_get_io_channel = true;
89 bool g_create_ch = true;
90 bool g_init_complete_called = false;
91 bool g_fini_start_called = true;
92 int g_status = 0;
93 int g_count = 0;
94 struct spdk_histogram_data *g_histogram = NULL;
95 TAILQ_HEAD(, ut_bdev_channel) g_ut_channels;
96 
97 static int
98 ut_accel_ch_create_cb(void *io_device, void *ctx)
99 {
100 	return 0;
101 }
102 
103 static void
104 ut_accel_ch_destroy_cb(void *io_device, void *ctx)
105 {
106 }
107 
108 static int
109 stub_create_ch(void *io_device, void *ctx_buf)
110 {
111 	struct ut_bdev_channel *ch = ctx_buf;
112 
113 	if (g_create_ch == false) {
114 		return -1;
115 	}
116 
117 	TAILQ_INIT(&ch->outstanding_io);
118 	ch->outstanding_cnt = 0;
119 	/*
120 	 * When avail gets to 0, the submit_request function will return ENOMEM.
121 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
122 	 *  big value that won't get hit.  The ENOMEM tests can then override this
123 	 *  value to something much smaller to induce ENOMEM conditions.
124 	 */
125 	ch->avail_cnt = 2048;
126 	ch->thread = spdk_get_thread();
127 
128 	TAILQ_INSERT_TAIL(&g_ut_channels, ch, link);
129 
130 	return 0;
131 }
132 
133 static void
134 stub_destroy_ch(void *io_device, void *ctx_buf)
135 {
136 	struct ut_bdev_channel *ch = ctx_buf;
137 
138 	TAILQ_REMOVE(&g_ut_channels, ch, link);
139 }
140 
141 static struct spdk_io_channel *
142 stub_get_io_channel(void *ctx)
143 {
144 	struct ut_bdev *ut_bdev = ctx;
145 
146 	if (g_get_io_channel == true) {
147 		return spdk_get_io_channel(ut_bdev->io_target);
148 	} else {
149 		return NULL;
150 	}
151 }
152 
153 static int
154 stub_destruct(void *ctx)
155 {
156 	return 0;
157 }
158 
159 static void
160 stub_reset_channel(void *ctx)
161 {
162 	struct ut_bdev_channel *ch = ctx;
163 	struct spdk_bdev_io *io;
164 
165 	while (!TAILQ_EMPTY(&ch->outstanding_io)) {
166 		io = TAILQ_FIRST(&ch->outstanding_io);
167 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
168 		ch->outstanding_cnt--;
169 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
170 		ch->avail_cnt++;
171 	}
172 }
173 
174 static void
175 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
176 {
177 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch), *tmp_ch;
178 	struct spdk_bdev_io *io;
179 
180 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
181 		TAILQ_FOREACH(tmp_ch, &g_ut_channels, link) {
182 			if (spdk_get_thread() == tmp_ch->thread) {
183 				stub_reset_channel(tmp_ch);
184 			} else {
185 				spdk_thread_send_msg(tmp_ch->thread, stub_reset_channel, tmp_ch);
186 			}
187 		}
188 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
189 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
190 			if (io == bdev_io->u.abort.bio_to_abort) {
191 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
192 				ch->outstanding_cnt--;
193 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
194 				ch->avail_cnt++;
195 
196 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
197 				return;
198 			}
199 		}
200 
201 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
202 		return;
203 	}
204 
205 	if (ch->avail_cnt > 0) {
206 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
207 		ch->outstanding_cnt++;
208 		ch->avail_cnt--;
209 	} else {
210 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
211 	}
212 }
213 
214 static uint32_t
215 stub_complete_io(void *io_target, uint32_t num_to_complete)
216 {
217 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
218 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
219 	struct spdk_bdev_io *io;
220 	bool complete_all = (num_to_complete == 0);
221 	uint32_t num_completed = 0;
222 
223 	while (complete_all || num_completed < num_to_complete) {
224 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
225 			break;
226 		}
227 		io = TAILQ_FIRST(&ch->outstanding_io);
228 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
229 		ch->outstanding_cnt--;
230 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
231 		ch->avail_cnt++;
232 		num_completed++;
233 	}
234 	spdk_put_io_channel(_ch);
235 	return num_completed;
236 }
237 
238 static bool
239 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
240 {
241 	return true;
242 }
243 
244 static struct spdk_bdev_fn_table fn_table = {
245 	.get_io_channel =	stub_get_io_channel,
246 	.destruct =		stub_destruct,
247 	.submit_request =	stub_submit_request,
248 	.io_type_supported =	stub_io_type_supported,
249 };
250 
251 struct spdk_bdev_module bdev_ut_if;
252 
253 static int
254 module_init(void)
255 {
256 	spdk_bdev_module_init_done(&bdev_ut_if);
257 	return 0;
258 }
259 
260 static void
261 module_fini(void)
262 {
263 }
264 
265 static void
266 init_complete(void)
267 {
268 	g_init_complete_called = true;
269 }
270 
271 static void
272 fini_start(void)
273 {
274 	g_fini_start_called = true;
275 }
276 
277 struct spdk_bdev_module bdev_ut_if = {
278 	.name = "bdev_ut",
279 	.module_init = module_init,
280 	.module_fini = module_fini,
281 	.async_init = true,
282 	.init_complete = init_complete,
283 	.fini_start = fini_start,
284 };
285 
286 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
287 
288 static void
289 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
290 {
291 	memset(ut_bdev, 0, sizeof(*ut_bdev));
292 
293 	ut_bdev->io_target = io_target;
294 	ut_bdev->bdev.ctxt = ut_bdev;
295 	ut_bdev->bdev.name = name;
296 	ut_bdev->bdev.fn_table = &fn_table;
297 	ut_bdev->bdev.module = &bdev_ut_if;
298 	ut_bdev->bdev.blocklen = 4096;
299 	ut_bdev->bdev.blockcnt = 1024;
300 
301 	spdk_bdev_register(&ut_bdev->bdev);
302 }
303 
304 static void
305 unregister_bdev(struct ut_bdev *ut_bdev)
306 {
307 	/* Handle any deferred messages. */
308 	poll_threads();
309 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
310 	/* Handle the async bdev unregister. */
311 	poll_threads();
312 }
313 
314 static void
315 bdev_init_cb(void *done, int rc)
316 {
317 	CU_ASSERT(rc == 0);
318 	*(bool *)done = true;
319 }
320 
321 static void
322 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
323 	       void *event_ctx)
324 {
325 	switch (type) {
326 	case SPDK_BDEV_EVENT_REMOVE:
327 		if (event_ctx != NULL) {
328 			*(bool *)event_ctx = true;
329 		}
330 		break;
331 	case SPDK_BDEV_EVENT_RESIZE:
332 		if (event_ctx != NULL) {
333 			*(int *)event_ctx += 1;
334 		}
335 		break;
336 	default:
337 		CU_ASSERT(false);
338 		break;
339 	}
340 }
341 
342 static void
343 setup_test(void)
344 {
345 	bool done = false;
346 	int rc;
347 
348 	TAILQ_INIT(&g_ut_channels);
349 
350 	allocate_cores(BDEV_UT_NUM_THREADS);
351 	allocate_threads(BDEV_UT_NUM_THREADS);
352 	set_thread(0);
353 
354 	rc = spdk_iobuf_initialize();
355 	CU_ASSERT(rc == 0);
356 	spdk_bdev_initialize(bdev_init_cb, &done);
357 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
358 				sizeof(struct ut_bdev_channel), NULL);
359 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
360 				ut_accel_ch_destroy_cb, 0, NULL);
361 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
362 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
363 }
364 
365 static void
366 finish_cb(void *cb_arg)
367 {
368 	g_teardown_done = true;
369 }
370 
371 static void
372 teardown_test(void)
373 {
374 	set_thread(0);
375 	g_teardown_done = false;
376 	spdk_bdev_close(g_desc);
377 	g_desc = NULL;
378 	unregister_bdev(&g_bdev);
379 	spdk_io_device_unregister(&g_io_device, NULL);
380 	spdk_bdev_finish(finish_cb, NULL);
381 	spdk_io_device_unregister(&g_accel_io_device, NULL);
382 	spdk_iobuf_finish(finish_cb, NULL);
383 	poll_threads();
384 	memset(&g_bdev, 0, sizeof(g_bdev));
385 	CU_ASSERT(g_teardown_done == true);
386 	g_teardown_done = false;
387 	free_threads();
388 	free_cores();
389 	CU_ASSERT(TAILQ_EMPTY(&g_ut_channels))
390 }
391 
392 static uint32_t
393 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
394 {
395 	struct spdk_bdev_io *io;
396 	uint32_t cnt = 0;
397 
398 	TAILQ_FOREACH(io, tailq, internal.link) {
399 		cnt++;
400 	}
401 
402 	return cnt;
403 }
404 
405 static void
406 basic(void)
407 {
408 	g_init_complete_called = false;
409 	setup_test();
410 	CU_ASSERT(g_init_complete_called == true);
411 
412 	set_thread(0);
413 
414 	g_get_io_channel = false;
415 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
416 	CU_ASSERT(g_ut_threads[0].ch == NULL);
417 
418 	g_get_io_channel = true;
419 	g_create_ch = false;
420 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
421 	CU_ASSERT(g_ut_threads[0].ch == NULL);
422 
423 	g_get_io_channel = true;
424 	g_create_ch = true;
425 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
426 	CU_ASSERT(g_ut_threads[0].ch != NULL);
427 	spdk_put_io_channel(g_ut_threads[0].ch);
428 
429 	g_fini_start_called = false;
430 	teardown_test();
431 	CU_ASSERT(g_fini_start_called == true);
432 }
433 
434 static void
435 _bdev_unregistered(void *done, int rc)
436 {
437 	CU_ASSERT(rc == 0);
438 	*(bool *)done = true;
439 }
440 
441 static void
442 unregister_and_close(void)
443 {
444 	bool done, remove_notify;
445 	struct spdk_bdev_desc *desc = NULL;
446 
447 	setup_test();
448 	set_thread(0);
449 
450 	/* setup_test() automatically opens the bdev,
451 	 * but this test needs to do that in a different
452 	 * way. */
453 	spdk_bdev_close(g_desc);
454 	poll_threads();
455 
456 	/* Try hotremoving a bdev with descriptors which don't provide
457 	 * any context to the notification callback */
458 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
459 	SPDK_CU_ASSERT_FATAL(desc != NULL);
460 
461 	/* There is an open descriptor on the device. Unregister it,
462 	 * which can't proceed until the descriptor is closed. */
463 	done = false;
464 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
465 
466 	/* Poll the threads to allow all events to be processed */
467 	poll_threads();
468 
469 	/* Make sure the bdev was not unregistered. We still have a
470 	 * descriptor open */
471 	CU_ASSERT(done == false);
472 
473 	spdk_bdev_close(desc);
474 	poll_threads();
475 	desc = NULL;
476 
477 	/* The unregister should have completed */
478 	CU_ASSERT(done == true);
479 
480 
481 	/* Register the bdev again */
482 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
483 
484 	remove_notify = false;
485 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
486 	SPDK_CU_ASSERT_FATAL(desc != NULL);
487 	CU_ASSERT(remove_notify == false);
488 
489 	/* There is an open descriptor on the device. Unregister it,
490 	 * which can't proceed until the descriptor is closed. */
491 	done = false;
492 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
493 	/* No polling has occurred, so neither of these should execute */
494 	CU_ASSERT(remove_notify == false);
495 	CU_ASSERT(done == false);
496 
497 	/* Prior to the unregister completing, close the descriptor */
498 	spdk_bdev_close(desc);
499 
500 	/* Poll the threads to allow all events to be processed */
501 	poll_threads();
502 
503 	/* Remove notify should not have been called because the
504 	 * descriptor is already closed. */
505 	CU_ASSERT(remove_notify == false);
506 
507 	/* The unregister should have completed */
508 	CU_ASSERT(done == true);
509 
510 	/* Restore the original g_bdev so that we can use teardown_test(). */
511 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
512 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
513 	teardown_test();
514 }
515 
516 static void
517 unregister_and_close_different_threads(void)
518 {
519 	bool done;
520 	struct spdk_bdev_desc *desc = NULL;
521 
522 	setup_test();
523 	set_thread(0);
524 
525 	/* setup_test() automatically opens the bdev,
526 	 * but this test needs to do that in a different
527 	 * way. */
528 	spdk_bdev_close(g_desc);
529 	poll_threads();
530 
531 	set_thread(1);
532 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
533 	SPDK_CU_ASSERT_FATAL(desc != NULL);
534 	done = false;
535 
536 	set_thread(0);
537 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
538 
539 	/* Poll the threads to allow all events to be processed */
540 	poll_threads();
541 
542 	/* Make sure the bdev was not unregistered. We still have a
543 	 * descriptor open */
544 	CU_ASSERT(done == false);
545 
546 	/* Close the descriptor on thread 1.  Poll the thread and confirm the
547 	 * unregister did not complete, since it was unregistered on thread 0.
548 	 */
549 	set_thread(1);
550 	spdk_bdev_close(desc);
551 	poll_thread(1);
552 	CU_ASSERT(done == false);
553 
554 	/* Now poll thread 0 and confirm the unregister completed. */
555 	set_thread(0);
556 	poll_thread(0);
557 	CU_ASSERT(done == true);
558 
559 	/* Restore the original g_bdev so that we can use teardown_test(). */
560 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
561 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
562 	teardown_test();
563 }
564 
565 static void
566 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
567 {
568 	bool *done = cb_arg;
569 
570 	CU_ASSERT(success == true);
571 	*done = true;
572 	spdk_bdev_free_io(bdev_io);
573 }
574 
575 static void
576 put_channel_during_reset(void)
577 {
578 	struct spdk_io_channel *io_ch;
579 	bool done = false;
580 
581 	setup_test();
582 
583 	set_thread(0);
584 	io_ch = spdk_bdev_get_io_channel(g_desc);
585 	CU_ASSERT(io_ch != NULL);
586 
587 	/*
588 	 * Start a reset, but then put the I/O channel before
589 	 *  the deferred messages for the reset get a chance to
590 	 *  execute.
591 	 */
592 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
593 	spdk_put_io_channel(io_ch);
594 	poll_threads();
595 	stub_complete_io(g_bdev.io_target, 0);
596 
597 	teardown_test();
598 }
599 
600 static void
601 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
602 {
603 	enum spdk_bdev_io_status *status = cb_arg;
604 
605 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
606 	spdk_bdev_free_io(bdev_io);
607 }
608 
609 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
610 
611 static void
612 aborted_reset(void)
613 {
614 	struct spdk_io_channel *io_ch[2];
615 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
616 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
617 
618 	setup_test();
619 
620 	set_thread(0);
621 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
622 	CU_ASSERT(io_ch[0] != NULL);
623 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
624 	poll_threads();
625 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
626 
627 	/*
628 	 * First reset has been submitted on ch0.  Now submit a second
629 	 *  reset on ch1 which will get queued since there is already a
630 	 *  reset in progress.
631 	 */
632 	set_thread(1);
633 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
634 	CU_ASSERT(io_ch[1] != NULL);
635 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
636 	poll_threads();
637 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
638 
639 	/*
640 	 * Now destroy ch1.  This will abort the queued reset.  Check that
641 	 *  the second reset was completed with failed status.  Also check
642 	 *  that bdev->internal.reset_in_progress != NULL, since the
643 	 *  original reset has not been completed yet.  This ensures that
644 	 *  the bdev code is correctly noticing that the failed reset is
645 	 *  *not* the one that had been submitted to the bdev module.
646 	 */
647 	set_thread(1);
648 	spdk_put_io_channel(io_ch[1]);
649 	poll_threads();
650 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
651 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
652 
653 	/*
654 	 * Now complete the first reset, verify that it completed with SUCCESS
655 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
656 	 */
657 	set_thread(0);
658 	spdk_put_io_channel(io_ch[0]);
659 	stub_complete_io(g_bdev.io_target, 0);
660 	poll_threads();
661 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
662 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
663 
664 	teardown_test();
665 }
666 
667 static void
668 aborted_reset_no_outstanding_io(void)
669 {
670 	struct spdk_io_channel *io_ch[2];
671 	struct spdk_bdev_channel *bdev_ch[2];
672 	struct spdk_bdev *bdev[2];
673 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
674 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
675 
676 	setup_test();
677 
678 	/*
679 	 * This time we test the reset without any outstanding IO
680 	 * present on the bdev channel, so both resets should finish
681 	 * immediately.
682 	 */
683 
684 	set_thread(0);
685 	/* Set reset_io_drain_timeout to allow bdev
686 	 * reset to stay pending until we call abort. */
687 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
688 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
689 	bdev[0] = bdev_ch[0]->bdev;
690 	bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
691 	CU_ASSERT(io_ch[0] != NULL);
692 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
693 	poll_threads();
694 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
695 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
696 	spdk_put_io_channel(io_ch[0]);
697 
698 	set_thread(1);
699 	/* Set reset_io_drain_timeout to allow bdev
700 	 * reset to stay pending until we call abort. */
701 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
702 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
703 	bdev[1] = bdev_ch[1]->bdev;
704 	bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
705 	CU_ASSERT(io_ch[1] != NULL);
706 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
707 	poll_threads();
708 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
709 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
710 	spdk_put_io_channel(io_ch[1]);
711 
712 	stub_complete_io(g_bdev.io_target, 0);
713 	poll_threads();
714 
715 	teardown_test();
716 }
717 
718 
719 static void
720 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
721 {
722 	enum spdk_bdev_io_status *status = cb_arg;
723 
724 	*status = bdev_io->internal.status;
725 	spdk_bdev_free_io(bdev_io);
726 }
727 
728 static void
729 io_during_reset(void)
730 {
731 	struct spdk_io_channel *io_ch[2];
732 	struct spdk_bdev_channel *bdev_ch[2];
733 	enum spdk_bdev_io_status status0, status1, status_reset;
734 	int rc;
735 
736 	setup_test();
737 
738 	/*
739 	 * First test normal case - submit an I/O on each of two channels (with no resets)
740 	 *  and verify they complete successfully.
741 	 */
742 	set_thread(0);
743 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
744 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
745 	CU_ASSERT(bdev_ch[0]->flags == 0);
746 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
747 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
748 	CU_ASSERT(rc == 0);
749 
750 	set_thread(1);
751 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
752 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
753 	CU_ASSERT(bdev_ch[1]->flags == 0);
754 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
755 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
756 	CU_ASSERT(rc == 0);
757 
758 	poll_threads();
759 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
760 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
761 
762 	set_thread(0);
763 	stub_complete_io(g_bdev.io_target, 0);
764 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
765 
766 	set_thread(1);
767 	stub_complete_io(g_bdev.io_target, 0);
768 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
769 
770 	/*
771 	 * Now submit a reset, and leave it pending while we submit I/O on two different
772 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
773 	 *  progress.
774 	 */
775 	set_thread(0);
776 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
777 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
778 	CU_ASSERT(rc == 0);
779 
780 	CU_ASSERT(bdev_ch[0]->flags == 0);
781 	CU_ASSERT(bdev_ch[1]->flags == 0);
782 	poll_threads();
783 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
784 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
785 
786 	set_thread(0);
787 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
788 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
789 	CU_ASSERT(rc == 0);
790 
791 	set_thread(1);
792 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
793 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
794 	CU_ASSERT(rc == 0);
795 
796 	/*
797 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
798 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
799 	 */
800 	poll_threads();
801 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
802 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
803 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
804 
805 	/*
806 	 * Complete the reset
807 	 */
808 	set_thread(0);
809 	stub_complete_io(g_bdev.io_target, 0);
810 
811 	/*
812 	 * Only poll thread 0. We should not get a completion.
813 	 */
814 	poll_thread(0);
815 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
816 
817 	/*
818 	 * Poll both thread 0 and 1 so the messages can propagate and we
819 	 * get a completion.
820 	 */
821 	poll_threads();
822 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
823 
824 	spdk_put_io_channel(io_ch[0]);
825 	set_thread(1);
826 	spdk_put_io_channel(io_ch[1]);
827 	poll_threads();
828 
829 	teardown_test();
830 }
831 
832 static uint32_t
833 count_queued_resets(void *io_target)
834 {
835 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
836 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
837 	struct spdk_bdev_io *io;
838 	uint32_t submitted_resets = 0;
839 
840 	TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
841 		if (io->type == SPDK_BDEV_IO_TYPE_RESET) {
842 			submitted_resets++;
843 		}
844 	}
845 
846 	spdk_put_io_channel(_ch);
847 
848 	return submitted_resets;
849 }
850 
851 static void
852 reset_completions(void)
853 {
854 	struct spdk_io_channel *io_ch;
855 	struct spdk_bdev_channel *bdev_ch;
856 	struct spdk_bdev *bdev;
857 	enum spdk_bdev_io_status status0, status_reset;
858 	int rc, iter;
859 
860 	setup_test();
861 
862 	/* This test covers four test cases:
863 	 * 1) reset_io_drain_timeout of a bdev is greater than 0
864 	 * 2) No outstandind IO are present on any bdev channel
865 	 * 3) Outstanding IO finish during bdev reset
866 	 * 4) Outstanding IO do not finish before reset is done waiting
867 	 *    for them.
868 	 *
869 	 * Above conditions mainly affect the timing of bdev reset completion
870 	 * and whether a reset should be skipped via spdk_bdev_io_complete()
871 	 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */
872 
873 	/* Test preparation */
874 	set_thread(0);
875 	io_ch = spdk_bdev_get_io_channel(g_desc);
876 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
877 	CU_ASSERT(bdev_ch->flags == 0);
878 
879 
880 	/* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */
881 	bdev = &g_bdev.bdev;
882 	bdev->reset_io_drain_timeout = 0;
883 
884 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
885 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
886 	CU_ASSERT(rc == 0);
887 	poll_threads();
888 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
889 
890 	/* Call reset completion inside bdev module. */
891 	stub_complete_io(g_bdev.io_target, 0);
892 	poll_threads();
893 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
894 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
895 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
896 
897 
898 	/* Test case 2) no outstanding IO are present. Reset should perform one iteration over
899 	* channels and then be skipped. */
900 	bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
901 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
902 
903 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
904 	CU_ASSERT(rc == 0);
905 	poll_threads();
906 	/* Reset was never submitted to the bdev module. */
907 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
908 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
909 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
910 
911 
912 	/* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate
913 	* wait poller to check for IO completions every second, until reset_io_drain_timeout is
914 	* reached, but finish earlier than this threshold. */
915 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
916 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
917 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
918 	CU_ASSERT(rc == 0);
919 
920 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
921 	CU_ASSERT(rc == 0);
922 	poll_threads();
923 	/* The reset just started and should not have been submitted yet. */
924 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
925 
926 	poll_threads();
927 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
928 	/* Let the poller wait for about half the time then complete outstanding IO. */
929 	for (iter = 0; iter < 2; iter++) {
930 		/* Reset is still processing and not submitted at this point. */
931 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
932 		spdk_delay_us(1000 * 1000);
933 		poll_threads();
934 		poll_threads();
935 	}
936 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
937 	stub_complete_io(g_bdev.io_target, 0);
938 	poll_threads();
939 	spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
940 	poll_threads();
941 	poll_threads();
942 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
943 	/* Sending reset to the bdev module has been skipped. */
944 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
945 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
946 
947 
948 	/* Test case 4) outstanding IO are still present after reset_io_drain_timeout
949 	* seconds have passed. */
950 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
951 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
952 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
953 	CU_ASSERT(rc == 0);
954 
955 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
956 	CU_ASSERT(rc == 0);
957 	poll_threads();
958 	/* The reset just started and should not have been submitted yet. */
959 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
960 
961 	poll_threads();
962 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
963 	/* Let the poller wait for reset_io_drain_timeout seconds. */
964 	for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) {
965 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
966 		spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
967 		poll_threads();
968 		poll_threads();
969 	}
970 
971 	/* After timing out, the reset should have been sent to the module. */
972 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
973 	/* Complete reset submitted to the module and the read IO. */
974 	stub_complete_io(g_bdev.io_target, 0);
975 	poll_threads();
976 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
977 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
978 
979 
980 	/* Destroy the channel and end the test. */
981 	spdk_put_io_channel(io_ch);
982 	poll_threads();
983 
984 	teardown_test();
985 }
986 
987 
988 static void
989 basic_qos(void)
990 {
991 	struct spdk_io_channel *io_ch[2];
992 	struct spdk_bdev_channel *bdev_ch[2];
993 	struct spdk_bdev *bdev;
994 	enum spdk_bdev_io_status status, abort_status;
995 	int rc;
996 
997 	setup_test();
998 
999 	/* Enable QoS */
1000 	bdev = &g_bdev.bdev;
1001 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1002 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1003 	TAILQ_INIT(&bdev->internal.qos->queued);
1004 	/*
1005 	 * Enable read/write IOPS, read only byte per second and
1006 	 * read/write byte per second rate limits.
1007 	 * In this case, all rate limits will take equal effect.
1008 	 */
1009 	/* 2000 read/write I/O per second, or 2 per millisecond */
1010 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1011 	/* 8K read/write byte per millisecond with 4K block size */
1012 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1013 	/* 8K read only byte per millisecond with 4K block size */
1014 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
1015 
1016 	g_get_io_channel = true;
1017 
1018 	set_thread(0);
1019 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1020 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1021 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1022 
1023 	set_thread(1);
1024 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1025 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1026 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1027 
1028 	/*
1029 	 * Send an I/O on thread 0, which is where the QoS thread is running.
1030 	 */
1031 	set_thread(0);
1032 	status = SPDK_BDEV_IO_STATUS_PENDING;
1033 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1034 	CU_ASSERT(rc == 0);
1035 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1036 	poll_threads();
1037 	stub_complete_io(g_bdev.io_target, 0);
1038 	poll_threads();
1039 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1040 
1041 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1042 	status = SPDK_BDEV_IO_STATUS_PENDING;
1043 	set_thread(1);
1044 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1045 	CU_ASSERT(rc == 0);
1046 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1047 	poll_threads();
1048 	/* Complete I/O on thread 0. This should not complete the I/O we submitted. */
1049 	set_thread(0);
1050 	stub_complete_io(g_bdev.io_target, 0);
1051 	poll_threads();
1052 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1053 	/* Now complete I/O on original thread 1. */
1054 	set_thread(1);
1055 	poll_threads();
1056 	stub_complete_io(g_bdev.io_target, 0);
1057 	poll_threads();
1058 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1059 
1060 	/* Reset rate limit for the next test cases. */
1061 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
1062 	poll_threads();
1063 
1064 	/*
1065 	 * Test abort request when QoS is enabled.
1066 	 */
1067 
1068 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
1069 	set_thread(0);
1070 	status = SPDK_BDEV_IO_STATUS_PENDING;
1071 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1072 	CU_ASSERT(rc == 0);
1073 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1074 	/* Send an abort to the I/O on the same thread. */
1075 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1076 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
1077 	CU_ASSERT(rc == 0);
1078 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1079 	poll_threads();
1080 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1081 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1082 
1083 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1084 	status = SPDK_BDEV_IO_STATUS_PENDING;
1085 	set_thread(1);
1086 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1087 	CU_ASSERT(rc == 0);
1088 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1089 	poll_threads();
1090 	/* Send an abort to the I/O on the same thread. */
1091 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1092 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
1093 	CU_ASSERT(rc == 0);
1094 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1095 	poll_threads();
1096 	/* Complete the I/O with failure and the abort with success on thread 1. */
1097 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1098 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1099 
1100 	set_thread(0);
1101 
1102 	/*
1103 	 * Close the descriptor only, which should stop the qos channel as
1104 	 * the last descriptor removed.
1105 	 */
1106 	spdk_bdev_close(g_desc);
1107 	poll_threads();
1108 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1109 
1110 	/*
1111 	 * Open the bdev again which shall setup the qos channel as the
1112 	 * channels are valid.
1113 	 */
1114 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1115 	poll_threads();
1116 	CU_ASSERT(bdev->internal.qos->ch != NULL);
1117 
1118 	/* Tear down the channels */
1119 	set_thread(0);
1120 	spdk_put_io_channel(io_ch[0]);
1121 	set_thread(1);
1122 	spdk_put_io_channel(io_ch[1]);
1123 	poll_threads();
1124 	set_thread(0);
1125 
1126 	/* Close the descriptor, which should stop the qos channel */
1127 	spdk_bdev_close(g_desc);
1128 	poll_threads();
1129 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1130 
1131 	/* Open the bdev again, no qos channel setup without valid channels. */
1132 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1133 	poll_threads();
1134 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1135 
1136 	/* Create the channels in reverse order. */
1137 	set_thread(1);
1138 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1139 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1140 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1141 
1142 	set_thread(0);
1143 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1144 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1145 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1146 
1147 	/* Confirm that the qos thread is now thread 1 */
1148 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
1149 
1150 	/* Tear down the channels */
1151 	set_thread(0);
1152 	spdk_put_io_channel(io_ch[0]);
1153 	set_thread(1);
1154 	spdk_put_io_channel(io_ch[1]);
1155 	poll_threads();
1156 
1157 	set_thread(0);
1158 
1159 	teardown_test();
1160 }
1161 
1162 static void
1163 io_during_qos_queue(void)
1164 {
1165 	struct spdk_io_channel *io_ch[2];
1166 	struct spdk_bdev_channel *bdev_ch[2];
1167 	struct spdk_bdev *bdev;
1168 	enum spdk_bdev_io_status status0, status1, status2;
1169 	int rc;
1170 
1171 	setup_test();
1172 	MOCK_SET(spdk_get_ticks, 0);
1173 
1174 	/* Enable QoS */
1175 	bdev = &g_bdev.bdev;
1176 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1177 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1178 	TAILQ_INIT(&bdev->internal.qos->queued);
1179 	/*
1180 	 * Enable read/write IOPS, read only byte per sec, write only
1181 	 * byte per sec and read/write byte per sec rate limits.
1182 	 * In this case, both read only and write only byte per sec
1183 	 * rate limit will take effect.
1184 	 */
1185 	/* 4000 read/write I/O per second, or 4 per millisecond */
1186 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
1187 	/* 8K byte per millisecond with 4K block size */
1188 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1189 	/* 4K byte per millisecond with 4K block size */
1190 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
1191 	/* 4K byte per millisecond with 4K block size */
1192 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
1193 
1194 	g_get_io_channel = true;
1195 
1196 	/* Create channels */
1197 	set_thread(0);
1198 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1199 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1200 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1201 
1202 	set_thread(1);
1203 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1204 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1205 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1206 
1207 	/* Send two read I/Os */
1208 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1209 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1210 	CU_ASSERT(rc == 0);
1211 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1212 	set_thread(0);
1213 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1214 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1215 	CU_ASSERT(rc == 0);
1216 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1217 	/* Send one write I/O */
1218 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
1219 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
1220 	CU_ASSERT(rc == 0);
1221 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
1222 
1223 	/* Complete any I/O that arrived at the disk */
1224 	poll_threads();
1225 	set_thread(1);
1226 	stub_complete_io(g_bdev.io_target, 0);
1227 	set_thread(0);
1228 	stub_complete_io(g_bdev.io_target, 0);
1229 	poll_threads();
1230 
1231 	/* Only one of the two read I/Os should complete. (logical XOR) */
1232 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
1233 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1234 	} else {
1235 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1236 	}
1237 	/* The write I/O should complete. */
1238 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
1239 
1240 	/* Advance in time by a millisecond */
1241 	spdk_delay_us(1000);
1242 
1243 	/* Complete more I/O */
1244 	poll_threads();
1245 	set_thread(1);
1246 	stub_complete_io(g_bdev.io_target, 0);
1247 	set_thread(0);
1248 	stub_complete_io(g_bdev.io_target, 0);
1249 	poll_threads();
1250 
1251 	/* Now the second read I/O should be done */
1252 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
1253 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1254 
1255 	/* Tear down the channels */
1256 	set_thread(1);
1257 	spdk_put_io_channel(io_ch[1]);
1258 	set_thread(0);
1259 	spdk_put_io_channel(io_ch[0]);
1260 	poll_threads();
1261 
1262 	teardown_test();
1263 }
1264 
1265 static void
1266 io_during_qos_reset(void)
1267 {
1268 	struct spdk_io_channel *io_ch[2];
1269 	struct spdk_bdev_channel *bdev_ch[2];
1270 	struct spdk_bdev *bdev;
1271 	enum spdk_bdev_io_status status0, status1, reset_status;
1272 	int rc;
1273 
1274 	setup_test();
1275 	MOCK_SET(spdk_get_ticks, 0);
1276 
1277 	/* Enable QoS */
1278 	bdev = &g_bdev.bdev;
1279 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1280 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1281 	TAILQ_INIT(&bdev->internal.qos->queued);
1282 	/*
1283 	 * Enable read/write IOPS, write only byte per sec and
1284 	 * read/write byte per second rate limits.
1285 	 * In this case, read/write byte per second rate limit will
1286 	 * take effect first.
1287 	 */
1288 	/* 2000 read/write I/O per second, or 2 per millisecond */
1289 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1290 	/* 4K byte per millisecond with 4K block size */
1291 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
1292 	/* 8K byte per millisecond with 4K block size */
1293 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
1294 
1295 	g_get_io_channel = true;
1296 
1297 	/* Create channels */
1298 	set_thread(0);
1299 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1300 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1301 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1302 
1303 	set_thread(1);
1304 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1305 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1306 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1307 
1308 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1309 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1310 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1311 	CU_ASSERT(rc == 0);
1312 	set_thread(0);
1313 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1314 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1315 	CU_ASSERT(rc == 0);
1316 
1317 	poll_threads();
1318 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1319 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1320 
1321 	/* Reset the bdev. */
1322 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1323 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1324 	CU_ASSERT(rc == 0);
1325 
1326 	/* Complete any I/O that arrived at the disk */
1327 	poll_threads();
1328 	set_thread(1);
1329 	stub_complete_io(g_bdev.io_target, 0);
1330 	set_thread(0);
1331 	stub_complete_io(g_bdev.io_target, 0);
1332 	poll_threads();
1333 
1334 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1335 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1336 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1337 
1338 	/* Tear down the channels */
1339 	set_thread(1);
1340 	spdk_put_io_channel(io_ch[1]);
1341 	set_thread(0);
1342 	spdk_put_io_channel(io_ch[0]);
1343 	poll_threads();
1344 
1345 	teardown_test();
1346 }
1347 
1348 static void
1349 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1350 {
1351 	enum spdk_bdev_io_status *status = cb_arg;
1352 
1353 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1354 	spdk_bdev_free_io(bdev_io);
1355 }
1356 
1357 static void
1358 enomem(void)
1359 {
1360 	struct spdk_io_channel *io_ch;
1361 	struct spdk_bdev_channel *bdev_ch;
1362 	struct spdk_bdev_shared_resource *shared_resource;
1363 	struct ut_bdev_channel *ut_ch;
1364 	const uint32_t IO_ARRAY_SIZE = 64;
1365 	const uint32_t AVAIL = 20;
1366 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1367 	uint32_t nomem_cnt, i;
1368 	struct spdk_bdev_io *first_io;
1369 	int rc;
1370 
1371 	setup_test();
1372 
1373 	set_thread(0);
1374 	io_ch = spdk_bdev_get_io_channel(g_desc);
1375 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1376 	shared_resource = bdev_ch->shared_resource;
1377 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1378 	ut_ch->avail_cnt = AVAIL;
1379 
1380 	/* First submit a number of IOs equal to what the channel can support. */
1381 	for (i = 0; i < AVAIL; i++) {
1382 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1383 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1384 		CU_ASSERT(rc == 0);
1385 	}
1386 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1387 
1388 	/*
1389 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1390 	 *  the enomem_io list.
1391 	 */
1392 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1393 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1394 	CU_ASSERT(rc == 0);
1395 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1396 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1397 
1398 	/*
1399 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1400 	 *  the first_io above.
1401 	 */
1402 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1403 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1404 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1405 		CU_ASSERT(rc == 0);
1406 	}
1407 
1408 	/* Assert that first_io is still at the head of the list. */
1409 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1410 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1411 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1412 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1413 
1414 	/*
1415 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1416 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1417 	 *  list.
1418 	 */
1419 	stub_complete_io(g_bdev.io_target, 1);
1420 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1421 
1422 	/*
1423 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1424 	 *  and we should see I/O get resubmitted to the test bdev module.
1425 	 */
1426 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1427 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1428 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1429 
1430 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1431 	stub_complete_io(g_bdev.io_target, 1);
1432 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1433 
1434 	/*
1435 	 * Send a reset and confirm that all I/O are completed, including the ones that
1436 	 *  were queued on the nomem_io list.
1437 	 */
1438 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1439 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1440 	poll_threads();
1441 	CU_ASSERT(rc == 0);
1442 	/* This will complete the reset. */
1443 	stub_complete_io(g_bdev.io_target, 0);
1444 
1445 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1446 	CU_ASSERT(shared_resource->io_outstanding == 0);
1447 
1448 	spdk_put_io_channel(io_ch);
1449 	poll_threads();
1450 	teardown_test();
1451 }
1452 
1453 static void
1454 enomem_multi_bdev(void)
1455 {
1456 	struct spdk_io_channel *io_ch;
1457 	struct spdk_bdev_channel *bdev_ch;
1458 	struct spdk_bdev_shared_resource *shared_resource;
1459 	struct ut_bdev_channel *ut_ch;
1460 	const uint32_t IO_ARRAY_SIZE = 64;
1461 	const uint32_t AVAIL = 20;
1462 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1463 	uint32_t i;
1464 	struct ut_bdev *second_bdev;
1465 	struct spdk_bdev_desc *second_desc = NULL;
1466 	struct spdk_bdev_channel *second_bdev_ch;
1467 	struct spdk_io_channel *second_ch;
1468 	int rc;
1469 
1470 	setup_test();
1471 
1472 	/* Register second bdev with the same io_target  */
1473 	second_bdev = calloc(1, sizeof(*second_bdev));
1474 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1475 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1476 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1477 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1478 
1479 	set_thread(0);
1480 	io_ch = spdk_bdev_get_io_channel(g_desc);
1481 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1482 	shared_resource = bdev_ch->shared_resource;
1483 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1484 	ut_ch->avail_cnt = AVAIL;
1485 
1486 	second_ch = spdk_bdev_get_io_channel(second_desc);
1487 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1488 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1489 
1490 	/* Saturate io_target through bdev A. */
1491 	for (i = 0; i < AVAIL; i++) {
1492 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1493 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1494 		CU_ASSERT(rc == 0);
1495 	}
1496 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1497 
1498 	/*
1499 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1500 	 * and then go onto the nomem_io list.
1501 	 */
1502 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1503 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1504 	CU_ASSERT(rc == 0);
1505 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1506 
1507 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1508 	stub_complete_io(g_bdev.io_target, AVAIL);
1509 
1510 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1511 	CU_ASSERT(shared_resource->io_outstanding == 1);
1512 
1513 	/* Now complete our retried I/O  */
1514 	stub_complete_io(g_bdev.io_target, 1);
1515 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1516 
1517 	spdk_put_io_channel(io_ch);
1518 	spdk_put_io_channel(second_ch);
1519 	spdk_bdev_close(second_desc);
1520 	unregister_bdev(second_bdev);
1521 	poll_threads();
1522 	free(second_bdev);
1523 	teardown_test();
1524 }
1525 
1526 static void
1527 enomem_multi_bdev_unregister(void)
1528 {
1529 	struct spdk_io_channel *io_ch;
1530 	struct spdk_bdev_channel *bdev_ch;
1531 	struct spdk_bdev_shared_resource *shared_resource;
1532 	struct ut_bdev_channel *ut_ch;
1533 	const uint32_t IO_ARRAY_SIZE = 64;
1534 	const uint32_t AVAIL = 20;
1535 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1536 	uint32_t i;
1537 	int rc;
1538 
1539 	setup_test();
1540 
1541 	set_thread(0);
1542 	io_ch = spdk_bdev_get_io_channel(g_desc);
1543 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1544 	shared_resource = bdev_ch->shared_resource;
1545 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1546 	ut_ch->avail_cnt = AVAIL;
1547 
1548 	/* Saturate io_target through the bdev. */
1549 	for (i = 0; i < AVAIL; i++) {
1550 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1551 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1552 		CU_ASSERT(rc == 0);
1553 	}
1554 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1555 
1556 	/*
1557 	 * Now submit I/O through the bdev. This should fail with ENOMEM
1558 	 * and then go onto the nomem_io list.
1559 	 */
1560 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1561 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1562 	CU_ASSERT(rc == 0);
1563 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1564 
1565 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
1566 	unregister_bdev(&g_bdev);
1567 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
1568 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1569 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
1570 
1571 	/* Complete the bdev's I/O. */
1572 	stub_complete_io(g_bdev.io_target, AVAIL);
1573 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1574 
1575 	spdk_put_io_channel(io_ch);
1576 	poll_threads();
1577 	teardown_test();
1578 }
1579 
1580 static void
1581 enomem_multi_io_target(void)
1582 {
1583 	struct spdk_io_channel *io_ch;
1584 	struct spdk_bdev_channel *bdev_ch;
1585 	struct ut_bdev_channel *ut_ch;
1586 	const uint32_t IO_ARRAY_SIZE = 64;
1587 	const uint32_t AVAIL = 20;
1588 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1589 	uint32_t i;
1590 	int new_io_device;
1591 	struct ut_bdev *second_bdev;
1592 	struct spdk_bdev_desc *second_desc = NULL;
1593 	struct spdk_bdev_channel *second_bdev_ch;
1594 	struct spdk_io_channel *second_ch;
1595 	int rc;
1596 
1597 	setup_test();
1598 
1599 	/* Create new io_target and a second bdev using it */
1600 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1601 				sizeof(struct ut_bdev_channel), NULL);
1602 	second_bdev = calloc(1, sizeof(*second_bdev));
1603 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1604 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1605 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1606 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1607 
1608 	set_thread(0);
1609 	io_ch = spdk_bdev_get_io_channel(g_desc);
1610 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1611 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1612 	ut_ch->avail_cnt = AVAIL;
1613 
1614 	/* Different io_target should imply a different shared_resource */
1615 	second_ch = spdk_bdev_get_io_channel(second_desc);
1616 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1617 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1618 
1619 	/* Saturate io_target through bdev A. */
1620 	for (i = 0; i < AVAIL; i++) {
1621 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1622 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1623 		CU_ASSERT(rc == 0);
1624 	}
1625 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1626 
1627 	/* Issue one more I/O to fill ENOMEM list. */
1628 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1629 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1630 	CU_ASSERT(rc == 0);
1631 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1632 
1633 	/*
1634 	 * Now submit I/O through the second bdev. This should go through and complete
1635 	 * successfully because we're using a different io_device underneath.
1636 	 */
1637 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1638 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1639 	CU_ASSERT(rc == 0);
1640 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1641 	stub_complete_io(second_bdev->io_target, 1);
1642 
1643 	/* Cleanup; Complete outstanding I/O. */
1644 	stub_complete_io(g_bdev.io_target, AVAIL);
1645 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1646 	/* Complete the ENOMEM I/O */
1647 	stub_complete_io(g_bdev.io_target, 1);
1648 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1649 
1650 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1651 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1652 	spdk_put_io_channel(io_ch);
1653 	spdk_put_io_channel(second_ch);
1654 	spdk_bdev_close(second_desc);
1655 	unregister_bdev(second_bdev);
1656 	spdk_io_device_unregister(&new_io_device, NULL);
1657 	poll_threads();
1658 	free(second_bdev);
1659 	teardown_test();
1660 }
1661 
1662 static void
1663 qos_dynamic_enable_done(void *cb_arg, int status)
1664 {
1665 	int *rc = cb_arg;
1666 	*rc = status;
1667 }
1668 
1669 static void
1670 qos_dynamic_enable(void)
1671 {
1672 	struct spdk_io_channel *io_ch[2];
1673 	struct spdk_bdev_channel *bdev_ch[2];
1674 	struct spdk_bdev *bdev;
1675 	enum spdk_bdev_io_status bdev_io_status[2];
1676 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1677 	int status, second_status, rc, i;
1678 
1679 	setup_test();
1680 	MOCK_SET(spdk_get_ticks, 0);
1681 
1682 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1683 		limits[i] = UINT64_MAX;
1684 	}
1685 
1686 	bdev = &g_bdev.bdev;
1687 
1688 	g_get_io_channel = true;
1689 
1690 	/* Create channels */
1691 	set_thread(0);
1692 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1693 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1694 	CU_ASSERT(bdev_ch[0]->flags == 0);
1695 
1696 	set_thread(1);
1697 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1698 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1699 	CU_ASSERT(bdev_ch[1]->flags == 0);
1700 
1701 	set_thread(0);
1702 
1703 	/*
1704 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1705 	 * Read only byte and Write only byte per second
1706 	 * rate limits.
1707 	 * More than 10 I/Os allowed per timeslice.
1708 	 */
1709 	status = -1;
1710 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1711 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1712 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1713 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1714 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1715 	poll_threads();
1716 	CU_ASSERT(status == 0);
1717 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1718 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1719 
1720 	/*
1721 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1722 	 * Additional I/O will then be queued.
1723 	 */
1724 	set_thread(0);
1725 	for (i = 0; i < 10; i++) {
1726 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1727 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1728 		CU_ASSERT(rc == 0);
1729 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1730 		poll_thread(0);
1731 		stub_complete_io(g_bdev.io_target, 0);
1732 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1733 	}
1734 
1735 	/*
1736 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1737 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1738 	 *  1) are not aborted
1739 	 *  2) are sent back to their original thread for resubmission
1740 	 */
1741 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1742 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1743 	CU_ASSERT(rc == 0);
1744 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1745 	set_thread(1);
1746 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1747 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1748 	CU_ASSERT(rc == 0);
1749 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1750 	poll_threads();
1751 
1752 	/*
1753 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1754 	 * Read only byte rate limits
1755 	 */
1756 	status = -1;
1757 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1758 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1759 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1760 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1761 	poll_threads();
1762 	CU_ASSERT(status == 0);
1763 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1764 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1765 
1766 	/* Disable QoS: Write only Byte per second rate limit */
1767 	status = -1;
1768 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1769 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1770 	poll_threads();
1771 	CU_ASSERT(status == 0);
1772 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1773 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1774 
1775 	/*
1776 	 * All I/O should have been resubmitted back on their original thread.  Complete
1777 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1778 	 */
1779 	set_thread(0);
1780 	stub_complete_io(g_bdev.io_target, 0);
1781 	poll_threads();
1782 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1783 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1784 
1785 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1786 	set_thread(1);
1787 	stub_complete_io(g_bdev.io_target, 0);
1788 	poll_threads();
1789 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1790 
1791 	/* Disable QoS again */
1792 	status = -1;
1793 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1794 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1795 	poll_threads();
1796 	CU_ASSERT(status == 0); /* This should succeed */
1797 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1798 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1799 
1800 	/* Enable QoS on thread 0 */
1801 	status = -1;
1802 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1803 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1804 	poll_threads();
1805 	CU_ASSERT(status == 0);
1806 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1807 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1808 
1809 	/* Disable QoS on thread 1 */
1810 	set_thread(1);
1811 	status = -1;
1812 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1813 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1814 	/* Don't poll yet. This should leave the channels with QoS enabled */
1815 	CU_ASSERT(status == -1);
1816 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1817 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1818 
1819 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1820 	second_status = 0;
1821 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1822 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1823 	poll_threads();
1824 	CU_ASSERT(status == 0); /* The disable should succeed */
1825 	CU_ASSERT(second_status < 0); /* The enable should fail */
1826 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1827 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1828 
1829 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1830 	status = -1;
1831 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1832 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1833 	poll_threads();
1834 	CU_ASSERT(status == 0);
1835 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1836 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1837 
1838 	/* Tear down the channels */
1839 	set_thread(0);
1840 	spdk_put_io_channel(io_ch[0]);
1841 	set_thread(1);
1842 	spdk_put_io_channel(io_ch[1]);
1843 	poll_threads();
1844 
1845 	set_thread(0);
1846 	teardown_test();
1847 }
1848 
1849 static void
1850 histogram_status_cb(void *cb_arg, int status)
1851 {
1852 	g_status = status;
1853 }
1854 
1855 static void
1856 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1857 {
1858 	g_status = status;
1859 	g_histogram = histogram;
1860 }
1861 
1862 static void
1863 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1864 		   uint64_t total, uint64_t so_far)
1865 {
1866 	g_count += count;
1867 }
1868 
1869 static void
1870 bdev_histograms_mt(void)
1871 {
1872 	struct spdk_io_channel *ch[2];
1873 	struct spdk_histogram_data *histogram;
1874 	uint8_t buf[4096];
1875 	int status = false;
1876 	int rc;
1877 
1878 
1879 	setup_test();
1880 
1881 	set_thread(0);
1882 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1883 	CU_ASSERT(ch[0] != NULL);
1884 
1885 	set_thread(1);
1886 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1887 	CU_ASSERT(ch[1] != NULL);
1888 
1889 
1890 	/* Enable histogram */
1891 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1892 	poll_threads();
1893 	CU_ASSERT(g_status == 0);
1894 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1895 
1896 	/* Allocate histogram */
1897 	histogram = spdk_histogram_data_alloc();
1898 
1899 	/* Check if histogram is zeroed */
1900 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1901 	poll_threads();
1902 	CU_ASSERT(g_status == 0);
1903 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1904 
1905 	g_count = 0;
1906 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1907 
1908 	CU_ASSERT(g_count == 0);
1909 
1910 	set_thread(0);
1911 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1912 	CU_ASSERT(rc == 0);
1913 
1914 	spdk_delay_us(10);
1915 	stub_complete_io(g_bdev.io_target, 1);
1916 	poll_threads();
1917 	CU_ASSERT(status == true);
1918 
1919 
1920 	set_thread(1);
1921 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1922 	CU_ASSERT(rc == 0);
1923 
1924 	spdk_delay_us(10);
1925 	stub_complete_io(g_bdev.io_target, 1);
1926 	poll_threads();
1927 	CU_ASSERT(status == true);
1928 
1929 	set_thread(0);
1930 
1931 	/* Check if histogram gathered data from all I/O channels */
1932 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1933 	poll_threads();
1934 	CU_ASSERT(g_status == 0);
1935 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1936 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1937 
1938 	g_count = 0;
1939 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1940 	CU_ASSERT(g_count == 2);
1941 
1942 	/* Disable histogram */
1943 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1944 	poll_threads();
1945 	CU_ASSERT(g_status == 0);
1946 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1947 
1948 	spdk_histogram_data_free(histogram);
1949 
1950 	/* Tear down the channels */
1951 	set_thread(0);
1952 	spdk_put_io_channel(ch[0]);
1953 	set_thread(1);
1954 	spdk_put_io_channel(ch[1]);
1955 	poll_threads();
1956 	set_thread(0);
1957 	teardown_test();
1958 
1959 }
1960 
1961 struct timeout_io_cb_arg {
1962 	struct iovec iov;
1963 	uint8_t type;
1964 };
1965 
1966 static int
1967 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1968 {
1969 	struct spdk_bdev_io *bdev_io;
1970 	int n = 0;
1971 
1972 	if (!ch) {
1973 		return -1;
1974 	}
1975 
1976 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1977 		n++;
1978 	}
1979 
1980 	return n;
1981 }
1982 
1983 static void
1984 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1985 {
1986 	struct timeout_io_cb_arg *ctx = cb_arg;
1987 
1988 	ctx->type = bdev_io->type;
1989 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1990 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1991 }
1992 
1993 static bool g_io_done;
1994 
1995 static void
1996 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1997 {
1998 	g_io_done = true;
1999 	spdk_bdev_free_io(bdev_io);
2000 }
2001 
2002 static void
2003 bdev_set_io_timeout_mt(void)
2004 {
2005 	struct spdk_io_channel *ch[3];
2006 	struct spdk_bdev_channel *bdev_ch[3];
2007 	struct timeout_io_cb_arg cb_arg;
2008 
2009 	setup_test();
2010 
2011 	g_bdev.bdev.optimal_io_boundary = 16;
2012 	g_bdev.bdev.split_on_optimal_io_boundary = true;
2013 
2014 	set_thread(0);
2015 	ch[0] = spdk_bdev_get_io_channel(g_desc);
2016 	CU_ASSERT(ch[0] != NULL);
2017 
2018 	set_thread(1);
2019 	ch[1] = spdk_bdev_get_io_channel(g_desc);
2020 	CU_ASSERT(ch[1] != NULL);
2021 
2022 	set_thread(2);
2023 	ch[2] = spdk_bdev_get_io_channel(g_desc);
2024 	CU_ASSERT(ch[2] != NULL);
2025 
2026 	/* Multi-thread mode
2027 	 * 1, Check the poller was registered successfully
2028 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
2029 	 * 3, Check the link int the bdev_ch works right.
2030 	 * 4, Close desc and put io channel during the timeout poller is polling
2031 	 */
2032 
2033 	/* In desc thread set the timeout */
2034 	set_thread(0);
2035 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2036 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
2037 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
2038 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
2039 
2040 	/* check the IO submitted list and timeout handler */
2041 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
2042 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
2043 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
2044 
2045 	set_thread(1);
2046 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2047 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
2048 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
2049 
2050 	/* Now test that a single-vector command is split correctly.
2051 	 * Offset 14, length 8, payload 0xF000
2052 	 *  Child - Offset 14, length 2, payload 0xF000
2053 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2054 	 *
2055 	 * Set up the expected values before calling spdk_bdev_read_blocks
2056 	 */
2057 	set_thread(2);
2058 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
2059 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
2060 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
2061 
2062 	set_thread(0);
2063 	memset(&cb_arg, 0, sizeof(cb_arg));
2064 	spdk_delay_us(3 * spdk_get_ticks_hz());
2065 	poll_threads();
2066 	CU_ASSERT(cb_arg.type == 0);
2067 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2068 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2069 
2070 	/* Now the time reach the limit */
2071 	spdk_delay_us(3 * spdk_get_ticks_hz());
2072 	poll_thread(0);
2073 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2074 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2075 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2076 	stub_complete_io(g_bdev.io_target, 1);
2077 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
2078 
2079 	memset(&cb_arg, 0, sizeof(cb_arg));
2080 	set_thread(1);
2081 	poll_thread(1);
2082 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2083 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2084 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2085 	stub_complete_io(g_bdev.io_target, 1);
2086 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
2087 
2088 	memset(&cb_arg, 0, sizeof(cb_arg));
2089 	set_thread(2);
2090 	poll_thread(2);
2091 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2092 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2093 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
2094 	stub_complete_io(g_bdev.io_target, 1);
2095 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
2096 	stub_complete_io(g_bdev.io_target, 1);
2097 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
2098 
2099 	/* Run poll_timeout_done() it means complete the timeout poller */
2100 	set_thread(0);
2101 	poll_thread(0);
2102 	CU_ASSERT(g_desc->refs == 0);
2103 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2104 	set_thread(1);
2105 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
2106 	set_thread(2);
2107 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
2108 
2109 	/* Trigger timeout poller to run again, desc->refs is incremented.
2110 	 * In thread 0 we destroy the io channel before timeout poller runs.
2111 	 * Timeout callback is not called on thread 0.
2112 	 */
2113 	spdk_delay_us(6 * spdk_get_ticks_hz());
2114 	memset(&cb_arg, 0, sizeof(cb_arg));
2115 	set_thread(0);
2116 	stub_complete_io(g_bdev.io_target, 1);
2117 	spdk_put_io_channel(ch[0]);
2118 	poll_thread(0);
2119 	CU_ASSERT(g_desc->refs == 1)
2120 	CU_ASSERT(cb_arg.type == 0);
2121 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2122 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2123 
2124 	/* In thread 1 timeout poller runs then we destroy the io channel
2125 	 * Timeout callback is called on thread 1.
2126 	 */
2127 	memset(&cb_arg, 0, sizeof(cb_arg));
2128 	set_thread(1);
2129 	poll_thread(1);
2130 	stub_complete_io(g_bdev.io_target, 1);
2131 	spdk_put_io_channel(ch[1]);
2132 	poll_thread(1);
2133 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2134 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2135 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
2136 
2137 	/* Close the desc.
2138 	 * Unregister the timeout poller first.
2139 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
2140 	 */
2141 	set_thread(0);
2142 	spdk_bdev_close(g_desc);
2143 	CU_ASSERT(g_desc->refs == 1);
2144 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
2145 
2146 	/* Timeout poller runs on thread 2 then we destroy the io channel.
2147 	 * Desc is closed so we would exit the timeout poller directly.
2148 	 * timeout callback is not called on thread 2.
2149 	 */
2150 	memset(&cb_arg, 0, sizeof(cb_arg));
2151 	set_thread(2);
2152 	poll_thread(2);
2153 	stub_complete_io(g_bdev.io_target, 1);
2154 	spdk_put_io_channel(ch[2]);
2155 	poll_thread(2);
2156 	CU_ASSERT(cb_arg.type == 0);
2157 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2158 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2159 
2160 	set_thread(0);
2161 	poll_thread(0);
2162 	g_teardown_done = false;
2163 	unregister_bdev(&g_bdev);
2164 	spdk_io_device_unregister(&g_io_device, NULL);
2165 	spdk_bdev_finish(finish_cb, NULL);
2166 	spdk_iobuf_finish(finish_cb, NULL);
2167 	poll_threads();
2168 	memset(&g_bdev, 0, sizeof(g_bdev));
2169 	CU_ASSERT(g_teardown_done == true);
2170 	g_teardown_done = false;
2171 	free_threads();
2172 	free_cores();
2173 }
2174 
2175 static bool g_io_done2;
2176 static bool g_lock_lba_range_done;
2177 static bool g_unlock_lba_range_done;
2178 
2179 static void
2180 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2181 {
2182 	g_io_done2 = true;
2183 	spdk_bdev_free_io(bdev_io);
2184 }
2185 
2186 static void
2187 lock_lba_range_done(struct lba_range *range, void *ctx, int status)
2188 {
2189 	g_lock_lba_range_done = true;
2190 }
2191 
2192 static void
2193 unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
2194 {
2195 	g_unlock_lba_range_done = true;
2196 }
2197 
2198 static uint32_t
2199 stub_channel_outstanding_cnt(void *io_target)
2200 {
2201 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
2202 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
2203 	uint32_t outstanding_cnt;
2204 
2205 	outstanding_cnt = ch->outstanding_cnt;
2206 
2207 	spdk_put_io_channel(_ch);
2208 	return outstanding_cnt;
2209 }
2210 
2211 static void
2212 lock_lba_range_then_submit_io(void)
2213 {
2214 	struct spdk_bdev_desc *desc = NULL;
2215 	void *io_target;
2216 	struct spdk_io_channel *io_ch[3];
2217 	struct spdk_bdev_channel *bdev_ch[3];
2218 	struct lba_range *range;
2219 	char buf[4096];
2220 	int ctx0, ctx1, ctx2;
2221 	int rc;
2222 
2223 	setup_test();
2224 
2225 	io_target = g_bdev.io_target;
2226 	desc = g_desc;
2227 
2228 	set_thread(0);
2229 	io_ch[0] = spdk_bdev_get_io_channel(desc);
2230 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
2231 	CU_ASSERT(io_ch[0] != NULL);
2232 
2233 	set_thread(1);
2234 	io_ch[1] = spdk_bdev_get_io_channel(desc);
2235 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
2236 	CU_ASSERT(io_ch[1] != NULL);
2237 
2238 	set_thread(0);
2239 	g_lock_lba_range_done = false;
2240 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
2241 	CU_ASSERT(rc == 0);
2242 	poll_threads();
2243 
2244 	/* The lock should immediately become valid, since there are no outstanding
2245 	 * write I/O.
2246 	 */
2247 	CU_ASSERT(g_lock_lba_range_done == true);
2248 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
2249 	SPDK_CU_ASSERT_FATAL(range != NULL);
2250 	CU_ASSERT(range->offset == 20);
2251 	CU_ASSERT(range->length == 10);
2252 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
2253 
2254 	g_io_done = false;
2255 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2256 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2257 	CU_ASSERT(rc == 0);
2258 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2259 
2260 	stub_complete_io(io_target, 1);
2261 	poll_threads();
2262 	CU_ASSERT(g_io_done == true);
2263 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2264 
2265 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
2266 	 * holding the lock is submitting the write I/O.
2267 	 */
2268 	g_io_done = false;
2269 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2270 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2271 	CU_ASSERT(rc == 0);
2272 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2273 
2274 	stub_complete_io(io_target, 1);
2275 	poll_threads();
2276 	CU_ASSERT(g_io_done == true);
2277 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2278 
2279 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
2280 	set_thread(1);
2281 	g_io_done = false;
2282 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2283 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
2284 	CU_ASSERT(rc == 0);
2285 	poll_threads();
2286 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2287 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2288 	CU_ASSERT(g_io_done == false);
2289 
2290 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
2291 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
2292 	CU_ASSERT(rc == -EINVAL);
2293 
2294 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
2295 	 * The new channel should inherit the active locks from the bdev's internal list.
2296 	 */
2297 	set_thread(2);
2298 	io_ch[2] = spdk_bdev_get_io_channel(desc);
2299 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
2300 	CU_ASSERT(io_ch[2] != NULL);
2301 
2302 	g_io_done2 = false;
2303 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2304 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
2305 	CU_ASSERT(rc == 0);
2306 	poll_threads();
2307 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2308 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2309 	CU_ASSERT(g_io_done2 == false);
2310 
2311 	set_thread(0);
2312 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
2313 	CU_ASSERT(rc == 0);
2314 	poll_threads();
2315 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
2316 
2317 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
2318 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2319 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2320 
2321 	set_thread(1);
2322 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2323 	stub_complete_io(io_target, 1);
2324 	set_thread(2);
2325 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2326 	stub_complete_io(io_target, 1);
2327 
2328 	poll_threads();
2329 	CU_ASSERT(g_io_done == true);
2330 	CU_ASSERT(g_io_done2 == true);
2331 
2332 	/* Tear down the channels */
2333 	set_thread(0);
2334 	spdk_put_io_channel(io_ch[0]);
2335 	set_thread(1);
2336 	spdk_put_io_channel(io_ch[1]);
2337 	set_thread(2);
2338 	spdk_put_io_channel(io_ch[2]);
2339 	poll_threads();
2340 	set_thread(0);
2341 	teardown_test();
2342 }
2343 
2344 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2345  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2346  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2347  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2348  * completes. Test this behavior.
2349  */
2350 static void
2351 unregister_during_reset(void)
2352 {
2353 	struct spdk_io_channel *io_ch[2];
2354 	bool done_reset = false, done_unregister = false;
2355 	int rc;
2356 
2357 	setup_test();
2358 	set_thread(0);
2359 
2360 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2361 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2362 
2363 	set_thread(1);
2364 
2365 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2366 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2367 
2368 	set_thread(0);
2369 
2370 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2371 
2372 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2373 	CU_ASSERT(rc == 0);
2374 
2375 	set_thread(0);
2376 
2377 	poll_thread_times(0, 1);
2378 
2379 	spdk_bdev_close(g_desc);
2380 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2381 
2382 	CU_ASSERT(done_reset == false);
2383 	CU_ASSERT(done_unregister == false);
2384 
2385 	poll_threads();
2386 
2387 	stub_complete_io(g_bdev.io_target, 0);
2388 
2389 	poll_threads();
2390 
2391 	CU_ASSERT(done_reset == true);
2392 	CU_ASSERT(done_unregister == false);
2393 
2394 	spdk_put_io_channel(io_ch[0]);
2395 
2396 	set_thread(1);
2397 
2398 	spdk_put_io_channel(io_ch[1]);
2399 
2400 	poll_threads();
2401 
2402 	CU_ASSERT(done_unregister == true);
2403 
2404 	/* Restore the original g_bdev so that we can use teardown_test(). */
2405 	set_thread(0);
2406 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2407 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2408 	teardown_test();
2409 }
2410 
2411 static void
2412 bdev_init_wt_cb(void *done, int rc)
2413 {
2414 }
2415 
2416 static int
2417 wrong_thread_setup(void)
2418 {
2419 	allocate_cores(1);
2420 	allocate_threads(2);
2421 	set_thread(0);
2422 
2423 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
2424 				ut_accel_ch_destroy_cb, 0, NULL);
2425 	spdk_bdev_initialize(bdev_init_wt_cb, NULL);
2426 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
2427 				sizeof(struct ut_bdev_channel), NULL);
2428 
2429 	set_thread(1);
2430 
2431 	return 0;
2432 }
2433 
2434 static int
2435 wrong_thread_teardown(void)
2436 {
2437 	int rc = 0;
2438 
2439 	set_thread(0);
2440 
2441 	g_teardown_done = false;
2442 	spdk_io_device_unregister(&g_io_device, NULL);
2443 	spdk_bdev_finish(finish_cb, NULL);
2444 	poll_threads();
2445 	memset(&g_bdev, 0, sizeof(g_bdev));
2446 	if (!g_teardown_done) {
2447 		fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__);
2448 		rc = -1;
2449 	}
2450 	g_teardown_done = false;
2451 
2452 	spdk_io_device_unregister(&g_accel_io_device, NULL);
2453 	free_threads();
2454 	free_cores();
2455 
2456 	return rc;
2457 }
2458 
2459 static void
2460 _bdev_unregistered_wt(void *ctx, int rc)
2461 {
2462 	struct spdk_thread **threadp = ctx;
2463 
2464 	*threadp = spdk_get_thread();
2465 }
2466 
2467 static void
2468 spdk_bdev_register_wt(void)
2469 {
2470 	struct spdk_bdev bdev = { 0 };
2471 	int rc;
2472 	struct spdk_thread *unreg_thread;
2473 
2474 	bdev.name = "wt_bdev";
2475 	bdev.fn_table = &fn_table;
2476 	bdev.module = &bdev_ut_if;
2477 	bdev.blocklen = 4096;
2478 	bdev.blockcnt = 1024;
2479 
2480 	/* Can register only on app thread */
2481 	rc = spdk_bdev_register(&bdev);
2482 	CU_ASSERT(rc == -EINVAL);
2483 
2484 	/* Can unregister on any thread */
2485 	set_thread(0);
2486 	rc = spdk_bdev_register(&bdev);
2487 	CU_ASSERT(rc == 0);
2488 	set_thread(1);
2489 	unreg_thread = NULL;
2490 	spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread);
2491 	poll_threads();
2492 	CU_ASSERT(unreg_thread == spdk_get_thread());
2493 
2494 	/* Can unregister by name on any thread */
2495 	set_thread(0);
2496 	rc = spdk_bdev_register(&bdev);
2497 	CU_ASSERT(rc == 0);
2498 	set_thread(1);
2499 	unreg_thread = NULL;
2500 	rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt,
2501 					  &unreg_thread);
2502 	CU_ASSERT(rc == 0);
2503 	poll_threads();
2504 	CU_ASSERT(unreg_thread == spdk_get_thread());
2505 }
2506 
2507 static void
2508 wait_for_examine_cb(void *arg)
2509 {
2510 	struct spdk_thread **thread = arg;
2511 
2512 	*thread = spdk_get_thread();
2513 }
2514 
2515 static void
2516 spdk_bdev_examine_wt(void)
2517 {
2518 	int rc;
2519 	bool save_auto_examine = g_bdev_opts.bdev_auto_examine;
2520 	struct spdk_thread *thread;
2521 
2522 	g_bdev_opts.bdev_auto_examine = false;
2523 
2524 	set_thread(0);
2525 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2526 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2527 	set_thread(1);
2528 
2529 	/* Can examine only on the app thread */
2530 	rc = spdk_bdev_examine("ut_bdev_wt");
2531 	CU_ASSERT(rc == -EINVAL);
2532 	unregister_bdev(&g_bdev);
2533 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2534 
2535 	/* Can wait for examine on app thread, callback called on app thread. */
2536 	set_thread(0);
2537 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2538 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2539 	thread = NULL;
2540 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2541 	CU_ASSERT(rc == 0);
2542 	poll_threads();
2543 	CU_ASSERT(thread == spdk_get_thread());
2544 	unregister_bdev(&g_bdev);
2545 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2546 
2547 	/* Can wait for examine on non-app thread, callback called on same thread. */
2548 	set_thread(0);
2549 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2550 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2551 	thread = NULL;
2552 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2553 	CU_ASSERT(rc == 0);
2554 	poll_threads();
2555 	CU_ASSERT(thread == spdk_get_thread());
2556 	unregister_bdev(&g_bdev);
2557 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2558 
2559 	unregister_bdev(&g_bdev);
2560 	g_bdev_opts.bdev_auto_examine = save_auto_examine;
2561 }
2562 
2563 static void
2564 event_notify_and_close(void)
2565 {
2566 	int resize_notify_count = 0;
2567 	struct spdk_bdev_desc *desc = NULL;
2568 	struct spdk_bdev *bdev;
2569 	int rc;
2570 
2571 	setup_test();
2572 	set_thread(0);
2573 
2574 	/* setup_test() automatically opens the bdev, but this test needs to do
2575 	 * that in a different way. */
2576 	spdk_bdev_close(g_desc);
2577 	poll_threads();
2578 
2579 	set_thread(1);
2580 
2581 	rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc);
2582 	CU_ASSERT(rc == 0);
2583 	SPDK_CU_ASSERT_FATAL(desc != NULL);
2584 
2585 	bdev = spdk_bdev_desc_get_bdev(desc);
2586 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2587 
2588 	/* Test a normal case that a resize event is notified. */
2589 	set_thread(0);
2590 
2591 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2);
2592 	CU_ASSERT(rc == 0);
2593 	CU_ASSERT(bdev->blockcnt == 1024 * 2);
2594 	CU_ASSERT(desc->refs == 1);
2595 	CU_ASSERT(resize_notify_count == 0);
2596 
2597 	poll_threads();
2598 
2599 	CU_ASSERT(desc->refs == 0);
2600 	CU_ASSERT(resize_notify_count == 1);
2601 
2602 	/* Test a complex case if the bdev is closed after two event_notify messages are sent,
2603 	 * then both event_notify messages are discarded and the desc is freed.
2604 	 */
2605 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3);
2606 	CU_ASSERT(rc == 0);
2607 	CU_ASSERT(bdev->blockcnt == 1024 * 3);
2608 	CU_ASSERT(desc->refs == 1);
2609 	CU_ASSERT(resize_notify_count == 1);
2610 
2611 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4);
2612 	CU_ASSERT(rc == 0);
2613 	CU_ASSERT(bdev->blockcnt == 1024 * 4);
2614 	CU_ASSERT(desc->refs == 2);
2615 	CU_ASSERT(resize_notify_count == 1);
2616 
2617 	set_thread(1);
2618 
2619 	spdk_bdev_close(desc);
2620 	CU_ASSERT(desc->closed == true);
2621 	CU_ASSERT(desc->refs == 2);
2622 	CU_ASSERT(resize_notify_count == 1);
2623 
2624 	poll_threads();
2625 
2626 	CU_ASSERT(resize_notify_count == 1);
2627 
2628 	set_thread(0);
2629 
2630 	/* Restore g_desc. Then, we can execute teardown_test(). */
2631 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2632 	teardown_test();
2633 }
2634 
2635 int
2636 main(int argc, char **argv)
2637 {
2638 	CU_pSuite	suite = NULL;
2639 	CU_pSuite	suite_wt = NULL;
2640 	unsigned int	num_failures;
2641 
2642 	CU_initialize_registry();
2643 
2644 	suite = CU_add_suite("bdev", NULL, NULL);
2645 	suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown);
2646 
2647 	CU_ADD_TEST(suite, basic);
2648 	CU_ADD_TEST(suite, unregister_and_close);
2649 	CU_ADD_TEST(suite, unregister_and_close_different_threads);
2650 	CU_ADD_TEST(suite, basic_qos);
2651 	CU_ADD_TEST(suite, put_channel_during_reset);
2652 	CU_ADD_TEST(suite, aborted_reset);
2653 	CU_ADD_TEST(suite, aborted_reset_no_outstanding_io);
2654 	CU_ADD_TEST(suite, io_during_reset);
2655 	CU_ADD_TEST(suite, reset_completions);
2656 	CU_ADD_TEST(suite, io_during_qos_queue);
2657 	CU_ADD_TEST(suite, io_during_qos_reset);
2658 	CU_ADD_TEST(suite, enomem);
2659 	CU_ADD_TEST(suite, enomem_multi_bdev);
2660 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2661 	CU_ADD_TEST(suite, enomem_multi_io_target);
2662 	CU_ADD_TEST(suite, qos_dynamic_enable);
2663 	CU_ADD_TEST(suite, bdev_histograms_mt);
2664 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2665 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2666 	CU_ADD_TEST(suite, unregister_during_reset);
2667 	CU_ADD_TEST(suite_wt, spdk_bdev_register_wt);
2668 	CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt);
2669 	CU_ADD_TEST(suite, event_notify_and_close);
2670 
2671 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2672 	CU_cleanup_registry();
2673 	return num_failures;
2674 }
2675