xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_internal/cunit.h"
8 
9 #include "common/lib/ut_multithread.c"
10 #include "unit/lib/json_mock.c"
11 
12 #include "spdk/config.h"
13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 #undef SPDK_CONFIG_VTUNE
15 
16 #include "bdev/bdev.c"
17 
18 #define BDEV_UT_NUM_THREADS 3
19 
20 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
21 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
22 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
23 		int *asc, int *ascq));
24 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
25 	    "test_domain");
26 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
27 	    (struct spdk_memory_domain *domain), 0);
28 DEFINE_STUB_V(spdk_accel_sequence_finish,
29 	      (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg));
30 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
31 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq));
32 DEFINE_STUB(spdk_accel_append_copy, int,
33 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
34 	     uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
35 	     struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
36 	     void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
37 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
38 
39 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
40 int
41 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
42 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
43 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
44 {
45 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
46 
47 	cpl_cb(cpl_cb_arg, 0);
48 	return 0;
49 }
50 
51 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
52 int
53 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
54 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
55 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
56 {
57 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
58 
59 	cpl_cb(cpl_cb_arg, 0);
60 	return 0;
61 }
62 
63 static int g_accel_io_device;
64 
65 struct spdk_io_channel *
66 spdk_accel_get_io_channel(void)
67 {
68 	return spdk_get_io_channel(&g_accel_io_device);
69 }
70 
71 struct ut_bdev {
72 	struct spdk_bdev	bdev;
73 	void			*io_target;
74 };
75 
76 struct ut_bdev_channel {
77 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
78 	uint32_t			outstanding_cnt;
79 	uint32_t			avail_cnt;
80 };
81 
82 int g_io_device;
83 struct ut_bdev g_bdev;
84 struct spdk_bdev_desc *g_desc;
85 bool g_teardown_done = false;
86 bool g_get_io_channel = true;
87 bool g_create_ch = true;
88 bool g_init_complete_called = false;
89 bool g_fini_start_called = true;
90 int g_status = 0;
91 int g_count = 0;
92 struct spdk_histogram_data *g_histogram = NULL;
93 
94 static int
95 ut_accel_ch_create_cb(void *io_device, void *ctx)
96 {
97 	return 0;
98 }
99 
100 static void
101 ut_accel_ch_destroy_cb(void *io_device, void *ctx)
102 {
103 }
104 
105 static int
106 stub_create_ch(void *io_device, void *ctx_buf)
107 {
108 	struct ut_bdev_channel *ch = ctx_buf;
109 
110 	if (g_create_ch == false) {
111 		return -1;
112 	}
113 
114 	TAILQ_INIT(&ch->outstanding_io);
115 	ch->outstanding_cnt = 0;
116 	/*
117 	 * When avail gets to 0, the submit_request function will return ENOMEM.
118 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
119 	 *  big value that won't get hit.  The ENOMEM tests can then override this
120 	 *  value to something much smaller to induce ENOMEM conditions.
121 	 */
122 	ch->avail_cnt = 2048;
123 	return 0;
124 }
125 
126 static void
127 stub_destroy_ch(void *io_device, void *ctx_buf)
128 {
129 }
130 
131 static struct spdk_io_channel *
132 stub_get_io_channel(void *ctx)
133 {
134 	struct ut_bdev *ut_bdev = ctx;
135 
136 	if (g_get_io_channel == true) {
137 		return spdk_get_io_channel(ut_bdev->io_target);
138 	} else {
139 		return NULL;
140 	}
141 }
142 
143 static int
144 stub_destruct(void *ctx)
145 {
146 	return 0;
147 }
148 
149 static void
150 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
151 {
152 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
153 	struct spdk_bdev_io *io;
154 
155 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
156 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
157 			io = TAILQ_FIRST(&ch->outstanding_io);
158 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
159 			ch->outstanding_cnt--;
160 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
161 			ch->avail_cnt++;
162 		}
163 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
164 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
165 			if (io == bdev_io->u.abort.bio_to_abort) {
166 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
167 				ch->outstanding_cnt--;
168 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
169 				ch->avail_cnt++;
170 
171 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
172 				return;
173 			}
174 		}
175 
176 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
177 		return;
178 	}
179 
180 	if (ch->avail_cnt > 0) {
181 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
182 		ch->outstanding_cnt++;
183 		ch->avail_cnt--;
184 	} else {
185 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
186 	}
187 }
188 
189 static uint32_t
190 stub_complete_io(void *io_target, uint32_t num_to_complete)
191 {
192 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
193 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
194 	struct spdk_bdev_io *io;
195 	bool complete_all = (num_to_complete == 0);
196 	uint32_t num_completed = 0;
197 
198 	while (complete_all || num_completed < num_to_complete) {
199 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
200 			break;
201 		}
202 		io = TAILQ_FIRST(&ch->outstanding_io);
203 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
204 		ch->outstanding_cnt--;
205 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
206 		ch->avail_cnt++;
207 		num_completed++;
208 	}
209 	spdk_put_io_channel(_ch);
210 	return num_completed;
211 }
212 
213 static bool
214 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
215 {
216 	return true;
217 }
218 
219 static struct spdk_bdev_fn_table fn_table = {
220 	.get_io_channel =	stub_get_io_channel,
221 	.destruct =		stub_destruct,
222 	.submit_request =	stub_submit_request,
223 	.io_type_supported =	stub_io_type_supported,
224 };
225 
226 struct spdk_bdev_module bdev_ut_if;
227 
228 static int
229 module_init(void)
230 {
231 	spdk_bdev_module_init_done(&bdev_ut_if);
232 	return 0;
233 }
234 
235 static void
236 module_fini(void)
237 {
238 }
239 
240 static void
241 init_complete(void)
242 {
243 	g_init_complete_called = true;
244 }
245 
246 static void
247 fini_start(void)
248 {
249 	g_fini_start_called = true;
250 }
251 
252 struct spdk_bdev_module bdev_ut_if = {
253 	.name = "bdev_ut",
254 	.module_init = module_init,
255 	.module_fini = module_fini,
256 	.async_init = true,
257 	.init_complete = init_complete,
258 	.fini_start = fini_start,
259 };
260 
261 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
262 
263 static void
264 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
265 {
266 	memset(ut_bdev, 0, sizeof(*ut_bdev));
267 
268 	ut_bdev->io_target = io_target;
269 	ut_bdev->bdev.ctxt = ut_bdev;
270 	ut_bdev->bdev.name = name;
271 	ut_bdev->bdev.fn_table = &fn_table;
272 	ut_bdev->bdev.module = &bdev_ut_if;
273 	ut_bdev->bdev.blocklen = 4096;
274 	ut_bdev->bdev.blockcnt = 1024;
275 
276 	spdk_bdev_register(&ut_bdev->bdev);
277 }
278 
279 static void
280 unregister_bdev(struct ut_bdev *ut_bdev)
281 {
282 	/* Handle any deferred messages. */
283 	poll_threads();
284 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
285 	/* Handle the async bdev unregister. */
286 	poll_threads();
287 }
288 
289 static void
290 bdev_init_cb(void *done, int rc)
291 {
292 	CU_ASSERT(rc == 0);
293 	*(bool *)done = true;
294 }
295 
296 static void
297 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
298 	       void *event_ctx)
299 {
300 	switch (type) {
301 	case SPDK_BDEV_EVENT_REMOVE:
302 		if (event_ctx != NULL) {
303 			*(bool *)event_ctx = true;
304 		}
305 		break;
306 	case SPDK_BDEV_EVENT_RESIZE:
307 		if (event_ctx != NULL) {
308 			*(int *)event_ctx += 1;
309 		}
310 		break;
311 	default:
312 		CU_ASSERT(false);
313 		break;
314 	}
315 }
316 
317 static void
318 setup_test(void)
319 {
320 	bool done = false;
321 	int rc;
322 
323 	allocate_cores(BDEV_UT_NUM_THREADS);
324 	allocate_threads(BDEV_UT_NUM_THREADS);
325 	set_thread(0);
326 
327 	rc = spdk_iobuf_initialize();
328 	CU_ASSERT(rc == 0);
329 	spdk_bdev_initialize(bdev_init_cb, &done);
330 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
331 				sizeof(struct ut_bdev_channel), NULL);
332 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
333 				ut_accel_ch_destroy_cb, 0, NULL);
334 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
335 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
336 }
337 
338 static void
339 finish_cb(void *cb_arg)
340 {
341 	g_teardown_done = true;
342 }
343 
344 static void
345 teardown_test(void)
346 {
347 	set_thread(0);
348 	g_teardown_done = false;
349 	spdk_bdev_close(g_desc);
350 	g_desc = NULL;
351 	unregister_bdev(&g_bdev);
352 	spdk_io_device_unregister(&g_io_device, NULL);
353 	spdk_bdev_finish(finish_cb, NULL);
354 	spdk_io_device_unregister(&g_accel_io_device, NULL);
355 	spdk_iobuf_finish(finish_cb, NULL);
356 	poll_threads();
357 	memset(&g_bdev, 0, sizeof(g_bdev));
358 	CU_ASSERT(g_teardown_done == true);
359 	g_teardown_done = false;
360 	free_threads();
361 	free_cores();
362 }
363 
364 static uint32_t
365 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
366 {
367 	struct spdk_bdev_io *io;
368 	uint32_t cnt = 0;
369 
370 	TAILQ_FOREACH(io, tailq, internal.link) {
371 		cnt++;
372 	}
373 
374 	return cnt;
375 }
376 
377 static void
378 basic(void)
379 {
380 	g_init_complete_called = false;
381 	setup_test();
382 	CU_ASSERT(g_init_complete_called == true);
383 
384 	set_thread(0);
385 
386 	g_get_io_channel = false;
387 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
388 	CU_ASSERT(g_ut_threads[0].ch == NULL);
389 
390 	g_get_io_channel = true;
391 	g_create_ch = false;
392 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
393 	CU_ASSERT(g_ut_threads[0].ch == NULL);
394 
395 	g_get_io_channel = true;
396 	g_create_ch = true;
397 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
398 	CU_ASSERT(g_ut_threads[0].ch != NULL);
399 	spdk_put_io_channel(g_ut_threads[0].ch);
400 
401 	g_fini_start_called = false;
402 	teardown_test();
403 	CU_ASSERT(g_fini_start_called == true);
404 }
405 
406 static void
407 _bdev_unregistered(void *done, int rc)
408 {
409 	CU_ASSERT(rc == 0);
410 	*(bool *)done = true;
411 }
412 
413 static void
414 unregister_and_close(void)
415 {
416 	bool done, remove_notify;
417 	struct spdk_bdev_desc *desc = NULL;
418 
419 	setup_test();
420 	set_thread(0);
421 
422 	/* setup_test() automatically opens the bdev,
423 	 * but this test needs to do that in a different
424 	 * way. */
425 	spdk_bdev_close(g_desc);
426 	poll_threads();
427 
428 	/* Try hotremoving a bdev with descriptors which don't provide
429 	 * any context to the notification callback */
430 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
431 	SPDK_CU_ASSERT_FATAL(desc != NULL);
432 
433 	/* There is an open descriptor on the device. Unregister it,
434 	 * which can't proceed until the descriptor is closed. */
435 	done = false;
436 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
437 
438 	/* Poll the threads to allow all events to be processed */
439 	poll_threads();
440 
441 	/* Make sure the bdev was not unregistered. We still have a
442 	 * descriptor open */
443 	CU_ASSERT(done == false);
444 
445 	spdk_bdev_close(desc);
446 	poll_threads();
447 	desc = NULL;
448 
449 	/* The unregister should have completed */
450 	CU_ASSERT(done == true);
451 
452 
453 	/* Register the bdev again */
454 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
455 
456 	remove_notify = false;
457 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
458 	SPDK_CU_ASSERT_FATAL(desc != NULL);
459 	CU_ASSERT(remove_notify == false);
460 
461 	/* There is an open descriptor on the device. Unregister it,
462 	 * which can't proceed until the descriptor is closed. */
463 	done = false;
464 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
465 	/* No polling has occurred, so neither of these should execute */
466 	CU_ASSERT(remove_notify == false);
467 	CU_ASSERT(done == false);
468 
469 	/* Prior to the unregister completing, close the descriptor */
470 	spdk_bdev_close(desc);
471 
472 	/* Poll the threads to allow all events to be processed */
473 	poll_threads();
474 
475 	/* Remove notify should not have been called because the
476 	 * descriptor is already closed. */
477 	CU_ASSERT(remove_notify == false);
478 
479 	/* The unregister should have completed */
480 	CU_ASSERT(done == true);
481 
482 	/* Restore the original g_bdev so that we can use teardown_test(). */
483 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
484 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
485 	teardown_test();
486 }
487 
488 static void
489 unregister_and_close_different_threads(void)
490 {
491 	bool done;
492 	struct spdk_bdev_desc *desc = NULL;
493 
494 	setup_test();
495 	set_thread(0);
496 
497 	/* setup_test() automatically opens the bdev,
498 	 * but this test needs to do that in a different
499 	 * way. */
500 	spdk_bdev_close(g_desc);
501 	poll_threads();
502 
503 	set_thread(1);
504 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
505 	SPDK_CU_ASSERT_FATAL(desc != NULL);
506 	done = false;
507 
508 	set_thread(0);
509 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
510 
511 	/* Poll the threads to allow all events to be processed */
512 	poll_threads();
513 
514 	/* Make sure the bdev was not unregistered. We still have a
515 	 * descriptor open */
516 	CU_ASSERT(done == false);
517 
518 	/* Close the descriptor on thread 1.  Poll the thread and confirm the
519 	 * unregister did not complete, since it was unregistered on thread 0.
520 	 */
521 	set_thread(1);
522 	spdk_bdev_close(desc);
523 	poll_thread(1);
524 	CU_ASSERT(done == false);
525 
526 	/* Now poll thread 0 and confirm the unregister completed. */
527 	set_thread(0);
528 	poll_thread(0);
529 	CU_ASSERT(done == true);
530 
531 	/* Restore the original g_bdev so that we can use teardown_test(). */
532 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
533 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
534 	teardown_test();
535 }
536 
537 static void
538 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
539 {
540 	bool *done = cb_arg;
541 
542 	CU_ASSERT(success == true);
543 	*done = true;
544 	spdk_bdev_free_io(bdev_io);
545 }
546 
547 static void
548 put_channel_during_reset(void)
549 {
550 	struct spdk_io_channel *io_ch;
551 	bool done = false;
552 
553 	setup_test();
554 
555 	set_thread(0);
556 	io_ch = spdk_bdev_get_io_channel(g_desc);
557 	CU_ASSERT(io_ch != NULL);
558 
559 	/*
560 	 * Start a reset, but then put the I/O channel before
561 	 *  the deferred messages for the reset get a chance to
562 	 *  execute.
563 	 */
564 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
565 	spdk_put_io_channel(io_ch);
566 	poll_threads();
567 	stub_complete_io(g_bdev.io_target, 0);
568 
569 	teardown_test();
570 }
571 
572 static void
573 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
574 {
575 	enum spdk_bdev_io_status *status = cb_arg;
576 
577 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
578 	spdk_bdev_free_io(bdev_io);
579 }
580 
581 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
582 
583 static void
584 aborted_reset(void)
585 {
586 	struct spdk_io_channel *io_ch[2];
587 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
588 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
589 
590 	setup_test();
591 
592 	set_thread(0);
593 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
594 	CU_ASSERT(io_ch[0] != NULL);
595 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
596 	poll_threads();
597 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
598 
599 	/*
600 	 * First reset has been submitted on ch0.  Now submit a second
601 	 *  reset on ch1 which will get queued since there is already a
602 	 *  reset in progress.
603 	 */
604 	set_thread(1);
605 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
606 	CU_ASSERT(io_ch[1] != NULL);
607 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
608 	poll_threads();
609 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
610 
611 	/*
612 	 * Now destroy ch1.  This will abort the queued reset.  Check that
613 	 *  the second reset was completed with failed status.  Also check
614 	 *  that bdev->internal.reset_in_progress != NULL, since the
615 	 *  original reset has not been completed yet.  This ensures that
616 	 *  the bdev code is correctly noticing that the failed reset is
617 	 *  *not* the one that had been submitted to the bdev module.
618 	 */
619 	set_thread(1);
620 	spdk_put_io_channel(io_ch[1]);
621 	poll_threads();
622 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
623 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
624 
625 	/*
626 	 * Now complete the first reset, verify that it completed with SUCCESS
627 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
628 	 */
629 	set_thread(0);
630 	spdk_put_io_channel(io_ch[0]);
631 	stub_complete_io(g_bdev.io_target, 0);
632 	poll_threads();
633 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
634 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
635 
636 	teardown_test();
637 }
638 
639 static void
640 aborted_reset_no_outstanding_io(void)
641 {
642 	struct spdk_io_channel *io_ch[2];
643 	struct spdk_bdev_channel *bdev_ch[2];
644 	struct spdk_bdev *bdev[2];
645 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
646 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
647 
648 	setup_test();
649 
650 	/*
651 	 * This time we test the reset without any outstanding IO
652 	 * present on the bdev channel, so both resets should finish
653 	 * immediately.
654 	 */
655 
656 	set_thread(0);
657 	/* Set reset_io_drain_timeout to allow bdev
658 	 * reset to stay pending until we call abort. */
659 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
660 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
661 	bdev[0] = bdev_ch[0]->bdev;
662 	bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
663 	CU_ASSERT(io_ch[0] != NULL);
664 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
665 	poll_threads();
666 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
667 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
668 	spdk_put_io_channel(io_ch[0]);
669 
670 	set_thread(1);
671 	/* Set reset_io_drain_timeout to allow bdev
672 	 * reset to stay pending until we call abort. */
673 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
674 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
675 	bdev[1] = bdev_ch[1]->bdev;
676 	bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
677 	CU_ASSERT(io_ch[1] != NULL);
678 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
679 	poll_threads();
680 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
681 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
682 	spdk_put_io_channel(io_ch[1]);
683 
684 	stub_complete_io(g_bdev.io_target, 0);
685 	poll_threads();
686 
687 	teardown_test();
688 }
689 
690 
691 static void
692 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
693 {
694 	enum spdk_bdev_io_status *status = cb_arg;
695 
696 	*status = bdev_io->internal.status;
697 	spdk_bdev_free_io(bdev_io);
698 }
699 
700 static void
701 io_during_reset(void)
702 {
703 	struct spdk_io_channel *io_ch[2];
704 	struct spdk_bdev_channel *bdev_ch[2];
705 	enum spdk_bdev_io_status status0, status1, status_reset;
706 	int rc;
707 
708 	setup_test();
709 
710 	/*
711 	 * First test normal case - submit an I/O on each of two channels (with no resets)
712 	 *  and verify they complete successfully.
713 	 */
714 	set_thread(0);
715 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
716 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
717 	CU_ASSERT(bdev_ch[0]->flags == 0);
718 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
719 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
720 	CU_ASSERT(rc == 0);
721 
722 	set_thread(1);
723 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
724 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
725 	CU_ASSERT(bdev_ch[1]->flags == 0);
726 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
727 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
728 	CU_ASSERT(rc == 0);
729 
730 	poll_threads();
731 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
732 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
733 
734 	set_thread(0);
735 	stub_complete_io(g_bdev.io_target, 0);
736 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
737 
738 	set_thread(1);
739 	stub_complete_io(g_bdev.io_target, 0);
740 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
741 
742 	/*
743 	 * Now submit a reset, and leave it pending while we submit I/O on two different
744 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
745 	 *  progress.
746 	 */
747 	set_thread(0);
748 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
749 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
750 	CU_ASSERT(rc == 0);
751 
752 	CU_ASSERT(bdev_ch[0]->flags == 0);
753 	CU_ASSERT(bdev_ch[1]->flags == 0);
754 	poll_threads();
755 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
756 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
757 
758 	set_thread(0);
759 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
760 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
761 	CU_ASSERT(rc == 0);
762 
763 	set_thread(1);
764 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
765 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
766 	CU_ASSERT(rc == 0);
767 
768 	/*
769 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
770 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
771 	 */
772 	poll_threads();
773 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
774 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
775 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
776 
777 	/*
778 	 * Complete the reset
779 	 */
780 	set_thread(0);
781 	stub_complete_io(g_bdev.io_target, 0);
782 
783 	/*
784 	 * Only poll thread 0. We should not get a completion.
785 	 */
786 	poll_thread(0);
787 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
788 
789 	/*
790 	 * Poll both thread 0 and 1 so the messages can propagate and we
791 	 * get a completion.
792 	 */
793 	poll_threads();
794 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
795 
796 	spdk_put_io_channel(io_ch[0]);
797 	set_thread(1);
798 	spdk_put_io_channel(io_ch[1]);
799 	poll_threads();
800 
801 	teardown_test();
802 }
803 
804 static uint32_t
805 count_queued_resets(void *io_target)
806 {
807 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
808 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
809 	struct spdk_bdev_io *io;
810 	uint32_t submitted_resets = 0;
811 
812 	TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
813 		if (io->type == SPDK_BDEV_IO_TYPE_RESET) {
814 			submitted_resets++;
815 		}
816 	}
817 
818 	spdk_put_io_channel(_ch);
819 
820 	return submitted_resets;
821 }
822 
823 static void
824 reset_completions(void)
825 {
826 	struct spdk_io_channel *io_ch;
827 	struct spdk_bdev_channel *bdev_ch;
828 	struct spdk_bdev *bdev;
829 	enum spdk_bdev_io_status status0, status_reset;
830 	int rc, iter;
831 
832 	setup_test();
833 
834 	/* This test covers four test cases:
835 	 * 1) reset_io_drain_timeout of a bdev is greater than 0
836 	 * 2) No outstandind IO are present on any bdev channel
837 	 * 3) Outstanding IO finish during bdev reset
838 	 * 4) Outstanding IO do not finish before reset is done waiting
839 	 *    for them.
840 	 *
841 	 * Above conditions mainly affect the timing of bdev reset completion
842 	 * and whether a reset should be skipped via spdk_bdev_io_complete()
843 	 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */
844 
845 	/* Test preparation */
846 	set_thread(0);
847 	io_ch = spdk_bdev_get_io_channel(g_desc);
848 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
849 	CU_ASSERT(bdev_ch->flags == 0);
850 
851 
852 	/* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */
853 	bdev = &g_bdev.bdev;
854 	bdev->reset_io_drain_timeout = 0;
855 
856 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
857 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
858 	CU_ASSERT(rc == 0);
859 	poll_threads();
860 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
861 
862 	/* Call reset completion inside bdev module. */
863 	stub_complete_io(g_bdev.io_target, 0);
864 	poll_threads();
865 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
866 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
867 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
868 
869 
870 	/* Test case 2) no outstanding IO are present. Reset should perform one iteration over
871 	* channels and then be skipped. */
872 	bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
873 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
874 
875 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
876 	CU_ASSERT(rc == 0);
877 	poll_threads();
878 	/* Reset was never submitted to the bdev module. */
879 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
880 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
881 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
882 
883 
884 	/* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate
885 	* wait poller to check for IO completions every second, until reset_io_drain_timeout is
886 	* reached, but finish earlier than this threshold. */
887 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
888 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
889 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
890 	CU_ASSERT(rc == 0);
891 
892 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
893 	CU_ASSERT(rc == 0);
894 	poll_threads();
895 	/* The reset just started and should not have been submitted yet. */
896 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
897 
898 	poll_threads();
899 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
900 	/* Let the poller wait for about half the time then complete outstanding IO. */
901 	for (iter = 0; iter < 2; iter++) {
902 		/* Reset is still processing and not submitted at this point. */
903 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
904 		spdk_delay_us(1000 * 1000);
905 		poll_threads();
906 		poll_threads();
907 	}
908 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
909 	stub_complete_io(g_bdev.io_target, 0);
910 	poll_threads();
911 	spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
912 	poll_threads();
913 	poll_threads();
914 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
915 	/* Sending reset to the bdev module has been skipped. */
916 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
917 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
918 
919 
920 	/* Test case 4) outstanding IO are still present after reset_io_drain_timeout
921 	* seconds have passed. */
922 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
923 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
924 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
925 	CU_ASSERT(rc == 0);
926 
927 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
928 	CU_ASSERT(rc == 0);
929 	poll_threads();
930 	/* The reset just started and should not have been submitted yet. */
931 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
932 
933 	poll_threads();
934 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
935 	/* Let the poller wait for reset_io_drain_timeout seconds. */
936 	for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) {
937 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
938 		spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
939 		poll_threads();
940 		poll_threads();
941 	}
942 
943 	/* After timing out, the reset should have been sent to the module. */
944 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
945 	/* Complete reset submitted to the module and the read IO. */
946 	stub_complete_io(g_bdev.io_target, 0);
947 	poll_threads();
948 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
949 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
950 
951 
952 	/* Destroy the channel and end the test. */
953 	spdk_put_io_channel(io_ch);
954 	poll_threads();
955 
956 	teardown_test();
957 }
958 
959 
960 static void
961 basic_qos(void)
962 {
963 	struct spdk_io_channel *io_ch[2];
964 	struct spdk_bdev_channel *bdev_ch[2];
965 	struct spdk_bdev *bdev;
966 	enum spdk_bdev_io_status status, abort_status;
967 	int rc;
968 
969 	setup_test();
970 
971 	/* Enable QoS */
972 	bdev = &g_bdev.bdev;
973 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
974 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
975 	TAILQ_INIT(&bdev->internal.qos->queued);
976 	/*
977 	 * Enable read/write IOPS, read only byte per second and
978 	 * read/write byte per second rate limits.
979 	 * In this case, all rate limits will take equal effect.
980 	 */
981 	/* 2000 read/write I/O per second, or 2 per millisecond */
982 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
983 	/* 8K read/write byte per millisecond with 4K block size */
984 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
985 	/* 8K read only byte per millisecond with 4K block size */
986 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
987 
988 	g_get_io_channel = true;
989 
990 	set_thread(0);
991 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
992 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
993 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
994 
995 	set_thread(1);
996 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
997 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
998 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
999 
1000 	/*
1001 	 * Send an I/O on thread 0, which is where the QoS thread is running.
1002 	 */
1003 	set_thread(0);
1004 	status = SPDK_BDEV_IO_STATUS_PENDING;
1005 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1006 	CU_ASSERT(rc == 0);
1007 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1008 	poll_threads();
1009 	stub_complete_io(g_bdev.io_target, 0);
1010 	poll_threads();
1011 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1012 
1013 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1014 	status = SPDK_BDEV_IO_STATUS_PENDING;
1015 	set_thread(1);
1016 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1017 	CU_ASSERT(rc == 0);
1018 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1019 	poll_threads();
1020 	/* Complete I/O on thread 0. This should not complete the I/O we submitted. */
1021 	set_thread(0);
1022 	stub_complete_io(g_bdev.io_target, 0);
1023 	poll_threads();
1024 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1025 	/* Now complete I/O on original thread 1. */
1026 	set_thread(1);
1027 	poll_threads();
1028 	stub_complete_io(g_bdev.io_target, 0);
1029 	poll_threads();
1030 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1031 
1032 	/* Reset rate limit for the next test cases. */
1033 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
1034 	poll_threads();
1035 
1036 	/*
1037 	 * Test abort request when QoS is enabled.
1038 	 */
1039 
1040 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
1041 	set_thread(0);
1042 	status = SPDK_BDEV_IO_STATUS_PENDING;
1043 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1044 	CU_ASSERT(rc == 0);
1045 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1046 	/* Send an abort to the I/O on the same thread. */
1047 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1048 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
1049 	CU_ASSERT(rc == 0);
1050 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1051 	poll_threads();
1052 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1053 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1054 
1055 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1056 	status = SPDK_BDEV_IO_STATUS_PENDING;
1057 	set_thread(1);
1058 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1059 	CU_ASSERT(rc == 0);
1060 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1061 	poll_threads();
1062 	/* Send an abort to the I/O on the same thread. */
1063 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1064 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
1065 	CU_ASSERT(rc == 0);
1066 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1067 	poll_threads();
1068 	/* Complete the I/O with failure and the abort with success on thread 1. */
1069 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1070 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1071 
1072 	set_thread(0);
1073 
1074 	/*
1075 	 * Close the descriptor only, which should stop the qos channel as
1076 	 * the last descriptor removed.
1077 	 */
1078 	spdk_bdev_close(g_desc);
1079 	poll_threads();
1080 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1081 
1082 	/*
1083 	 * Open the bdev again which shall setup the qos channel as the
1084 	 * channels are valid.
1085 	 */
1086 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1087 	poll_threads();
1088 	CU_ASSERT(bdev->internal.qos->ch != NULL);
1089 
1090 	/* Tear down the channels */
1091 	set_thread(0);
1092 	spdk_put_io_channel(io_ch[0]);
1093 	set_thread(1);
1094 	spdk_put_io_channel(io_ch[1]);
1095 	poll_threads();
1096 	set_thread(0);
1097 
1098 	/* Close the descriptor, which should stop the qos channel */
1099 	spdk_bdev_close(g_desc);
1100 	poll_threads();
1101 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1102 
1103 	/* Open the bdev again, no qos channel setup without valid channels. */
1104 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1105 	poll_threads();
1106 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1107 
1108 	/* Create the channels in reverse order. */
1109 	set_thread(1);
1110 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1111 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1112 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1113 
1114 	set_thread(0);
1115 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1116 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1117 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1118 
1119 	/* Confirm that the qos thread is now thread 1 */
1120 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
1121 
1122 	/* Tear down the channels */
1123 	set_thread(0);
1124 	spdk_put_io_channel(io_ch[0]);
1125 	set_thread(1);
1126 	spdk_put_io_channel(io_ch[1]);
1127 	poll_threads();
1128 
1129 	set_thread(0);
1130 
1131 	teardown_test();
1132 }
1133 
1134 static void
1135 io_during_qos_queue(void)
1136 {
1137 	struct spdk_io_channel *io_ch[2];
1138 	struct spdk_bdev_channel *bdev_ch[2];
1139 	struct spdk_bdev *bdev;
1140 	enum spdk_bdev_io_status status0, status1, status2;
1141 	int rc;
1142 
1143 	setup_test();
1144 	MOCK_SET(spdk_get_ticks, 0);
1145 
1146 	/* Enable QoS */
1147 	bdev = &g_bdev.bdev;
1148 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1149 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1150 	TAILQ_INIT(&bdev->internal.qos->queued);
1151 	/*
1152 	 * Enable read/write IOPS, read only byte per sec, write only
1153 	 * byte per sec and read/write byte per sec rate limits.
1154 	 * In this case, both read only and write only byte per sec
1155 	 * rate limit will take effect.
1156 	 */
1157 	/* 4000 read/write I/O per second, or 4 per millisecond */
1158 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
1159 	/* 8K byte per millisecond with 4K block size */
1160 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1161 	/* 4K byte per millisecond with 4K block size */
1162 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
1163 	/* 4K byte per millisecond with 4K block size */
1164 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
1165 
1166 	g_get_io_channel = true;
1167 
1168 	/* Create channels */
1169 	set_thread(0);
1170 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1171 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1172 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1173 
1174 	set_thread(1);
1175 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1176 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1177 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1178 
1179 	/* Send two read I/Os */
1180 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1181 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1182 	CU_ASSERT(rc == 0);
1183 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1184 	set_thread(0);
1185 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1186 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1187 	CU_ASSERT(rc == 0);
1188 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1189 	/* Send one write I/O */
1190 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
1191 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
1192 	CU_ASSERT(rc == 0);
1193 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
1194 
1195 	/* Complete any I/O that arrived at the disk */
1196 	poll_threads();
1197 	set_thread(1);
1198 	stub_complete_io(g_bdev.io_target, 0);
1199 	set_thread(0);
1200 	stub_complete_io(g_bdev.io_target, 0);
1201 	poll_threads();
1202 
1203 	/* Only one of the two read I/Os should complete. (logical XOR) */
1204 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
1205 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1206 	} else {
1207 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1208 	}
1209 	/* The write I/O should complete. */
1210 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
1211 
1212 	/* Advance in time by a millisecond */
1213 	spdk_delay_us(1000);
1214 
1215 	/* Complete more I/O */
1216 	poll_threads();
1217 	set_thread(1);
1218 	stub_complete_io(g_bdev.io_target, 0);
1219 	set_thread(0);
1220 	stub_complete_io(g_bdev.io_target, 0);
1221 	poll_threads();
1222 
1223 	/* Now the second read I/O should be done */
1224 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
1225 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1226 
1227 	/* Tear down the channels */
1228 	set_thread(1);
1229 	spdk_put_io_channel(io_ch[1]);
1230 	set_thread(0);
1231 	spdk_put_io_channel(io_ch[0]);
1232 	poll_threads();
1233 
1234 	teardown_test();
1235 }
1236 
1237 static void
1238 io_during_qos_reset(void)
1239 {
1240 	struct spdk_io_channel *io_ch[2];
1241 	struct spdk_bdev_channel *bdev_ch[2];
1242 	struct spdk_bdev *bdev;
1243 	enum spdk_bdev_io_status status0, status1, reset_status;
1244 	int rc;
1245 
1246 	setup_test();
1247 	MOCK_SET(spdk_get_ticks, 0);
1248 
1249 	/* Enable QoS */
1250 	bdev = &g_bdev.bdev;
1251 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1252 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1253 	TAILQ_INIT(&bdev->internal.qos->queued);
1254 	/*
1255 	 * Enable read/write IOPS, write only byte per sec and
1256 	 * read/write byte per second rate limits.
1257 	 * In this case, read/write byte per second rate limit will
1258 	 * take effect first.
1259 	 */
1260 	/* 2000 read/write I/O per second, or 2 per millisecond */
1261 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1262 	/* 4K byte per millisecond with 4K block size */
1263 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
1264 	/* 8K byte per millisecond with 4K block size */
1265 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
1266 
1267 	g_get_io_channel = true;
1268 
1269 	/* Create channels */
1270 	set_thread(0);
1271 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1272 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1273 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1274 
1275 	set_thread(1);
1276 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1277 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1278 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1279 
1280 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1281 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1282 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1283 	CU_ASSERT(rc == 0);
1284 	set_thread(0);
1285 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1286 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1287 	CU_ASSERT(rc == 0);
1288 
1289 	poll_threads();
1290 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1291 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1292 
1293 	/* Reset the bdev. */
1294 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1295 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1296 	CU_ASSERT(rc == 0);
1297 
1298 	/* Complete any I/O that arrived at the disk */
1299 	poll_threads();
1300 	set_thread(1);
1301 	stub_complete_io(g_bdev.io_target, 0);
1302 	set_thread(0);
1303 	stub_complete_io(g_bdev.io_target, 0);
1304 	poll_threads();
1305 
1306 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1307 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1308 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1309 
1310 	/* Tear down the channels */
1311 	set_thread(1);
1312 	spdk_put_io_channel(io_ch[1]);
1313 	set_thread(0);
1314 	spdk_put_io_channel(io_ch[0]);
1315 	poll_threads();
1316 
1317 	teardown_test();
1318 }
1319 
1320 static void
1321 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1322 {
1323 	enum spdk_bdev_io_status *status = cb_arg;
1324 
1325 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1326 	spdk_bdev_free_io(bdev_io);
1327 }
1328 
1329 static void
1330 enomem(void)
1331 {
1332 	struct spdk_io_channel *io_ch;
1333 	struct spdk_bdev_channel *bdev_ch;
1334 	struct spdk_bdev_shared_resource *shared_resource;
1335 	struct ut_bdev_channel *ut_ch;
1336 	const uint32_t IO_ARRAY_SIZE = 64;
1337 	const uint32_t AVAIL = 20;
1338 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1339 	uint32_t nomem_cnt, i;
1340 	struct spdk_bdev_io *first_io;
1341 	int rc;
1342 
1343 	setup_test();
1344 
1345 	set_thread(0);
1346 	io_ch = spdk_bdev_get_io_channel(g_desc);
1347 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1348 	shared_resource = bdev_ch->shared_resource;
1349 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1350 	ut_ch->avail_cnt = AVAIL;
1351 
1352 	/* First submit a number of IOs equal to what the channel can support. */
1353 	for (i = 0; i < AVAIL; i++) {
1354 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1355 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1356 		CU_ASSERT(rc == 0);
1357 	}
1358 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1359 
1360 	/*
1361 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1362 	 *  the enomem_io list.
1363 	 */
1364 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1365 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1366 	CU_ASSERT(rc == 0);
1367 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1368 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1369 
1370 	/*
1371 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1372 	 *  the first_io above.
1373 	 */
1374 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1375 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1376 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1377 		CU_ASSERT(rc == 0);
1378 	}
1379 
1380 	/* Assert that first_io is still at the head of the list. */
1381 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1382 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1383 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1384 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1385 
1386 	/*
1387 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1388 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1389 	 *  list.
1390 	 */
1391 	stub_complete_io(g_bdev.io_target, 1);
1392 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1393 
1394 	/*
1395 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1396 	 *  and we should see I/O get resubmitted to the test bdev module.
1397 	 */
1398 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1399 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1400 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1401 
1402 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1403 	stub_complete_io(g_bdev.io_target, 1);
1404 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1405 
1406 	/*
1407 	 * Send a reset and confirm that all I/O are completed, including the ones that
1408 	 *  were queued on the nomem_io list.
1409 	 */
1410 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1411 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1412 	poll_threads();
1413 	CU_ASSERT(rc == 0);
1414 	/* This will complete the reset. */
1415 	stub_complete_io(g_bdev.io_target, 0);
1416 
1417 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1418 	CU_ASSERT(shared_resource->io_outstanding == 0);
1419 
1420 	spdk_put_io_channel(io_ch);
1421 	poll_threads();
1422 	teardown_test();
1423 }
1424 
1425 static void
1426 enomem_multi_bdev(void)
1427 {
1428 	struct spdk_io_channel *io_ch;
1429 	struct spdk_bdev_channel *bdev_ch;
1430 	struct spdk_bdev_shared_resource *shared_resource;
1431 	struct ut_bdev_channel *ut_ch;
1432 	const uint32_t IO_ARRAY_SIZE = 64;
1433 	const uint32_t AVAIL = 20;
1434 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1435 	uint32_t i;
1436 	struct ut_bdev *second_bdev;
1437 	struct spdk_bdev_desc *second_desc = NULL;
1438 	struct spdk_bdev_channel *second_bdev_ch;
1439 	struct spdk_io_channel *second_ch;
1440 	int rc;
1441 
1442 	setup_test();
1443 
1444 	/* Register second bdev with the same io_target  */
1445 	second_bdev = calloc(1, sizeof(*second_bdev));
1446 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1447 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1448 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1449 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1450 
1451 	set_thread(0);
1452 	io_ch = spdk_bdev_get_io_channel(g_desc);
1453 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1454 	shared_resource = bdev_ch->shared_resource;
1455 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1456 	ut_ch->avail_cnt = AVAIL;
1457 
1458 	second_ch = spdk_bdev_get_io_channel(second_desc);
1459 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1460 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1461 
1462 	/* Saturate io_target through bdev A. */
1463 	for (i = 0; i < AVAIL; i++) {
1464 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1465 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1466 		CU_ASSERT(rc == 0);
1467 	}
1468 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1469 
1470 	/*
1471 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1472 	 * and then go onto the nomem_io list.
1473 	 */
1474 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1475 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1476 	CU_ASSERT(rc == 0);
1477 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1478 
1479 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1480 	stub_complete_io(g_bdev.io_target, AVAIL);
1481 
1482 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1483 	CU_ASSERT(shared_resource->io_outstanding == 1);
1484 
1485 	/* Now complete our retried I/O  */
1486 	stub_complete_io(g_bdev.io_target, 1);
1487 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1488 
1489 	spdk_put_io_channel(io_ch);
1490 	spdk_put_io_channel(second_ch);
1491 	spdk_bdev_close(second_desc);
1492 	unregister_bdev(second_bdev);
1493 	poll_threads();
1494 	free(second_bdev);
1495 	teardown_test();
1496 }
1497 
1498 static void
1499 enomem_multi_bdev_unregister(void)
1500 {
1501 	struct spdk_io_channel *io_ch;
1502 	struct spdk_bdev_channel *bdev_ch;
1503 	struct spdk_bdev_shared_resource *shared_resource;
1504 	struct ut_bdev_channel *ut_ch;
1505 	const uint32_t IO_ARRAY_SIZE = 64;
1506 	const uint32_t AVAIL = 20;
1507 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1508 	uint32_t i;
1509 	int rc;
1510 
1511 	setup_test();
1512 
1513 	set_thread(0);
1514 	io_ch = spdk_bdev_get_io_channel(g_desc);
1515 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1516 	shared_resource = bdev_ch->shared_resource;
1517 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1518 	ut_ch->avail_cnt = AVAIL;
1519 
1520 	/* Saturate io_target through the bdev. */
1521 	for (i = 0; i < AVAIL; i++) {
1522 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1523 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1524 		CU_ASSERT(rc == 0);
1525 	}
1526 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1527 
1528 	/*
1529 	 * Now submit I/O through the bdev. This should fail with ENOMEM
1530 	 * and then go onto the nomem_io list.
1531 	 */
1532 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1533 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1534 	CU_ASSERT(rc == 0);
1535 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1536 
1537 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
1538 	unregister_bdev(&g_bdev);
1539 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
1540 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1541 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
1542 
1543 	/* Complete the bdev's I/O. */
1544 	stub_complete_io(g_bdev.io_target, AVAIL);
1545 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1546 
1547 	spdk_put_io_channel(io_ch);
1548 	poll_threads();
1549 	teardown_test();
1550 }
1551 
1552 static void
1553 enomem_multi_io_target(void)
1554 {
1555 	struct spdk_io_channel *io_ch;
1556 	struct spdk_bdev_channel *bdev_ch;
1557 	struct ut_bdev_channel *ut_ch;
1558 	const uint32_t IO_ARRAY_SIZE = 64;
1559 	const uint32_t AVAIL = 20;
1560 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1561 	uint32_t i;
1562 	int new_io_device;
1563 	struct ut_bdev *second_bdev;
1564 	struct spdk_bdev_desc *second_desc = NULL;
1565 	struct spdk_bdev_channel *second_bdev_ch;
1566 	struct spdk_io_channel *second_ch;
1567 	int rc;
1568 
1569 	setup_test();
1570 
1571 	/* Create new io_target and a second bdev using it */
1572 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1573 				sizeof(struct ut_bdev_channel), NULL);
1574 	second_bdev = calloc(1, sizeof(*second_bdev));
1575 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1576 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1577 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1578 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1579 
1580 	set_thread(0);
1581 	io_ch = spdk_bdev_get_io_channel(g_desc);
1582 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1583 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1584 	ut_ch->avail_cnt = AVAIL;
1585 
1586 	/* Different io_target should imply a different shared_resource */
1587 	second_ch = spdk_bdev_get_io_channel(second_desc);
1588 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1589 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1590 
1591 	/* Saturate io_target through bdev A. */
1592 	for (i = 0; i < AVAIL; i++) {
1593 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1594 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1595 		CU_ASSERT(rc == 0);
1596 	}
1597 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1598 
1599 	/* Issue one more I/O to fill ENOMEM list. */
1600 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1601 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1602 	CU_ASSERT(rc == 0);
1603 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1604 
1605 	/*
1606 	 * Now submit I/O through the second bdev. This should go through and complete
1607 	 * successfully because we're using a different io_device underneath.
1608 	 */
1609 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1610 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1611 	CU_ASSERT(rc == 0);
1612 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1613 	stub_complete_io(second_bdev->io_target, 1);
1614 
1615 	/* Cleanup; Complete outstanding I/O. */
1616 	stub_complete_io(g_bdev.io_target, AVAIL);
1617 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1618 	/* Complete the ENOMEM I/O */
1619 	stub_complete_io(g_bdev.io_target, 1);
1620 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1621 
1622 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1623 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1624 	spdk_put_io_channel(io_ch);
1625 	spdk_put_io_channel(second_ch);
1626 	spdk_bdev_close(second_desc);
1627 	unregister_bdev(second_bdev);
1628 	spdk_io_device_unregister(&new_io_device, NULL);
1629 	poll_threads();
1630 	free(second_bdev);
1631 	teardown_test();
1632 }
1633 
1634 static void
1635 qos_dynamic_enable_done(void *cb_arg, int status)
1636 {
1637 	int *rc = cb_arg;
1638 	*rc = status;
1639 }
1640 
1641 static void
1642 qos_dynamic_enable(void)
1643 {
1644 	struct spdk_io_channel *io_ch[2];
1645 	struct spdk_bdev_channel *bdev_ch[2];
1646 	struct spdk_bdev *bdev;
1647 	enum spdk_bdev_io_status bdev_io_status[2];
1648 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1649 	int status, second_status, rc, i;
1650 
1651 	setup_test();
1652 	MOCK_SET(spdk_get_ticks, 0);
1653 
1654 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1655 		limits[i] = UINT64_MAX;
1656 	}
1657 
1658 	bdev = &g_bdev.bdev;
1659 
1660 	g_get_io_channel = true;
1661 
1662 	/* Create channels */
1663 	set_thread(0);
1664 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1665 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1666 	CU_ASSERT(bdev_ch[0]->flags == 0);
1667 
1668 	set_thread(1);
1669 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1670 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1671 	CU_ASSERT(bdev_ch[1]->flags == 0);
1672 
1673 	set_thread(0);
1674 
1675 	/*
1676 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1677 	 * Read only byte and Write only byte per second
1678 	 * rate limits.
1679 	 * More than 10 I/Os allowed per timeslice.
1680 	 */
1681 	status = -1;
1682 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1683 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1684 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1685 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1686 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1687 	poll_threads();
1688 	CU_ASSERT(status == 0);
1689 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1690 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1691 
1692 	/*
1693 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1694 	 * Additional I/O will then be queued.
1695 	 */
1696 	set_thread(0);
1697 	for (i = 0; i < 10; i++) {
1698 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1699 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1700 		CU_ASSERT(rc == 0);
1701 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1702 		poll_thread(0);
1703 		stub_complete_io(g_bdev.io_target, 0);
1704 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1705 	}
1706 
1707 	/*
1708 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1709 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1710 	 *  1) are not aborted
1711 	 *  2) are sent back to their original thread for resubmission
1712 	 */
1713 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1714 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1715 	CU_ASSERT(rc == 0);
1716 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1717 	set_thread(1);
1718 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1719 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1720 	CU_ASSERT(rc == 0);
1721 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1722 	poll_threads();
1723 
1724 	/*
1725 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1726 	 * Read only byte rate limits
1727 	 */
1728 	status = -1;
1729 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1730 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1731 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1732 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1733 	poll_threads();
1734 	CU_ASSERT(status == 0);
1735 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1736 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1737 
1738 	/* Disable QoS: Write only Byte per second rate limit */
1739 	status = -1;
1740 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1741 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1742 	poll_threads();
1743 	CU_ASSERT(status == 0);
1744 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1745 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1746 
1747 	/*
1748 	 * All I/O should have been resubmitted back on their original thread.  Complete
1749 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1750 	 */
1751 	set_thread(0);
1752 	stub_complete_io(g_bdev.io_target, 0);
1753 	poll_threads();
1754 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1755 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1756 
1757 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1758 	set_thread(1);
1759 	stub_complete_io(g_bdev.io_target, 0);
1760 	poll_threads();
1761 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1762 
1763 	/* Disable QoS again */
1764 	status = -1;
1765 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1766 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1767 	poll_threads();
1768 	CU_ASSERT(status == 0); /* This should succeed */
1769 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1770 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1771 
1772 	/* Enable QoS on thread 0 */
1773 	status = -1;
1774 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1775 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1776 	poll_threads();
1777 	CU_ASSERT(status == 0);
1778 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1779 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1780 
1781 	/* Disable QoS on thread 1 */
1782 	set_thread(1);
1783 	status = -1;
1784 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1785 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1786 	/* Don't poll yet. This should leave the channels with QoS enabled */
1787 	CU_ASSERT(status == -1);
1788 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1789 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1790 
1791 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1792 	second_status = 0;
1793 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1794 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1795 	poll_threads();
1796 	CU_ASSERT(status == 0); /* The disable should succeed */
1797 	CU_ASSERT(second_status < 0); /* The enable should fail */
1798 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1799 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1800 
1801 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1802 	status = -1;
1803 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1804 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1805 	poll_threads();
1806 	CU_ASSERT(status == 0);
1807 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1808 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1809 
1810 	/* Tear down the channels */
1811 	set_thread(0);
1812 	spdk_put_io_channel(io_ch[0]);
1813 	set_thread(1);
1814 	spdk_put_io_channel(io_ch[1]);
1815 	poll_threads();
1816 
1817 	set_thread(0);
1818 	teardown_test();
1819 }
1820 
1821 static void
1822 histogram_status_cb(void *cb_arg, int status)
1823 {
1824 	g_status = status;
1825 }
1826 
1827 static void
1828 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1829 {
1830 	g_status = status;
1831 	g_histogram = histogram;
1832 }
1833 
1834 static void
1835 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1836 		   uint64_t total, uint64_t so_far)
1837 {
1838 	g_count += count;
1839 }
1840 
1841 static void
1842 bdev_histograms_mt(void)
1843 {
1844 	struct spdk_io_channel *ch[2];
1845 	struct spdk_histogram_data *histogram;
1846 	uint8_t buf[4096];
1847 	int status = false;
1848 	int rc;
1849 
1850 
1851 	setup_test();
1852 
1853 	set_thread(0);
1854 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1855 	CU_ASSERT(ch[0] != NULL);
1856 
1857 	set_thread(1);
1858 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1859 	CU_ASSERT(ch[1] != NULL);
1860 
1861 
1862 	/* Enable histogram */
1863 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1864 	poll_threads();
1865 	CU_ASSERT(g_status == 0);
1866 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1867 
1868 	/* Allocate histogram */
1869 	histogram = spdk_histogram_data_alloc();
1870 
1871 	/* Check if histogram is zeroed */
1872 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1873 	poll_threads();
1874 	CU_ASSERT(g_status == 0);
1875 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1876 
1877 	g_count = 0;
1878 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1879 
1880 	CU_ASSERT(g_count == 0);
1881 
1882 	set_thread(0);
1883 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1884 	CU_ASSERT(rc == 0);
1885 
1886 	spdk_delay_us(10);
1887 	stub_complete_io(g_bdev.io_target, 1);
1888 	poll_threads();
1889 	CU_ASSERT(status == true);
1890 
1891 
1892 	set_thread(1);
1893 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1894 	CU_ASSERT(rc == 0);
1895 
1896 	spdk_delay_us(10);
1897 	stub_complete_io(g_bdev.io_target, 1);
1898 	poll_threads();
1899 	CU_ASSERT(status == true);
1900 
1901 	set_thread(0);
1902 
1903 	/* Check if histogram gathered data from all I/O channels */
1904 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1905 	poll_threads();
1906 	CU_ASSERT(g_status == 0);
1907 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1908 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1909 
1910 	g_count = 0;
1911 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1912 	CU_ASSERT(g_count == 2);
1913 
1914 	/* Disable histogram */
1915 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1916 	poll_threads();
1917 	CU_ASSERT(g_status == 0);
1918 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1919 
1920 	spdk_histogram_data_free(histogram);
1921 
1922 	/* Tear down the channels */
1923 	set_thread(0);
1924 	spdk_put_io_channel(ch[0]);
1925 	set_thread(1);
1926 	spdk_put_io_channel(ch[1]);
1927 	poll_threads();
1928 	set_thread(0);
1929 	teardown_test();
1930 
1931 }
1932 
1933 struct timeout_io_cb_arg {
1934 	struct iovec iov;
1935 	uint8_t type;
1936 };
1937 
1938 static int
1939 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1940 {
1941 	struct spdk_bdev_io *bdev_io;
1942 	int n = 0;
1943 
1944 	if (!ch) {
1945 		return -1;
1946 	}
1947 
1948 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1949 		n++;
1950 	}
1951 
1952 	return n;
1953 }
1954 
1955 static void
1956 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1957 {
1958 	struct timeout_io_cb_arg *ctx = cb_arg;
1959 
1960 	ctx->type = bdev_io->type;
1961 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1962 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1963 }
1964 
1965 static bool g_io_done;
1966 
1967 static void
1968 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1969 {
1970 	g_io_done = true;
1971 	spdk_bdev_free_io(bdev_io);
1972 }
1973 
1974 static void
1975 bdev_set_io_timeout_mt(void)
1976 {
1977 	struct spdk_io_channel *ch[3];
1978 	struct spdk_bdev_channel *bdev_ch[3];
1979 	struct timeout_io_cb_arg cb_arg;
1980 
1981 	setup_test();
1982 
1983 	g_bdev.bdev.optimal_io_boundary = 16;
1984 	g_bdev.bdev.split_on_optimal_io_boundary = true;
1985 
1986 	set_thread(0);
1987 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1988 	CU_ASSERT(ch[0] != NULL);
1989 
1990 	set_thread(1);
1991 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1992 	CU_ASSERT(ch[1] != NULL);
1993 
1994 	set_thread(2);
1995 	ch[2] = spdk_bdev_get_io_channel(g_desc);
1996 	CU_ASSERT(ch[2] != NULL);
1997 
1998 	/* Multi-thread mode
1999 	 * 1, Check the poller was registered successfully
2000 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
2001 	 * 3, Check the link int the bdev_ch works right.
2002 	 * 4, Close desc and put io channel during the timeout poller is polling
2003 	 */
2004 
2005 	/* In desc thread set the timeout */
2006 	set_thread(0);
2007 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2008 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
2009 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
2010 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
2011 
2012 	/* check the IO submitted list and timeout handler */
2013 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
2014 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
2015 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
2016 
2017 	set_thread(1);
2018 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2019 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
2020 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
2021 
2022 	/* Now test that a single-vector command is split correctly.
2023 	 * Offset 14, length 8, payload 0xF000
2024 	 *  Child - Offset 14, length 2, payload 0xF000
2025 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2026 	 *
2027 	 * Set up the expected values before calling spdk_bdev_read_blocks
2028 	 */
2029 	set_thread(2);
2030 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
2031 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
2032 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
2033 
2034 	set_thread(0);
2035 	memset(&cb_arg, 0, sizeof(cb_arg));
2036 	spdk_delay_us(3 * spdk_get_ticks_hz());
2037 	poll_threads();
2038 	CU_ASSERT(cb_arg.type == 0);
2039 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2040 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2041 
2042 	/* Now the time reach the limit */
2043 	spdk_delay_us(3 * spdk_get_ticks_hz());
2044 	poll_thread(0);
2045 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2046 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2047 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2048 	stub_complete_io(g_bdev.io_target, 1);
2049 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
2050 
2051 	memset(&cb_arg, 0, sizeof(cb_arg));
2052 	set_thread(1);
2053 	poll_thread(1);
2054 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2055 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2056 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2057 	stub_complete_io(g_bdev.io_target, 1);
2058 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
2059 
2060 	memset(&cb_arg, 0, sizeof(cb_arg));
2061 	set_thread(2);
2062 	poll_thread(2);
2063 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2064 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2065 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
2066 	stub_complete_io(g_bdev.io_target, 1);
2067 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
2068 	stub_complete_io(g_bdev.io_target, 1);
2069 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
2070 
2071 	/* Run poll_timeout_done() it means complete the timeout poller */
2072 	set_thread(0);
2073 	poll_thread(0);
2074 	CU_ASSERT(g_desc->refs == 0);
2075 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2076 	set_thread(1);
2077 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
2078 	set_thread(2);
2079 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
2080 
2081 	/* Trigger timeout poller to run again, desc->refs is incremented.
2082 	 * In thread 0 we destroy the io channel before timeout poller runs.
2083 	 * Timeout callback is not called on thread 0.
2084 	 */
2085 	spdk_delay_us(6 * spdk_get_ticks_hz());
2086 	memset(&cb_arg, 0, sizeof(cb_arg));
2087 	set_thread(0);
2088 	stub_complete_io(g_bdev.io_target, 1);
2089 	spdk_put_io_channel(ch[0]);
2090 	poll_thread(0);
2091 	CU_ASSERT(g_desc->refs == 1)
2092 	CU_ASSERT(cb_arg.type == 0);
2093 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2094 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2095 
2096 	/* In thread 1 timeout poller runs then we destroy the io channel
2097 	 * Timeout callback is called on thread 1.
2098 	 */
2099 	memset(&cb_arg, 0, sizeof(cb_arg));
2100 	set_thread(1);
2101 	poll_thread(1);
2102 	stub_complete_io(g_bdev.io_target, 1);
2103 	spdk_put_io_channel(ch[1]);
2104 	poll_thread(1);
2105 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2106 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2107 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
2108 
2109 	/* Close the desc.
2110 	 * Unregister the timeout poller first.
2111 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
2112 	 */
2113 	set_thread(0);
2114 	spdk_bdev_close(g_desc);
2115 	CU_ASSERT(g_desc->refs == 1);
2116 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
2117 
2118 	/* Timeout poller runs on thread 2 then we destroy the io channel.
2119 	 * Desc is closed so we would exit the timeout poller directly.
2120 	 * timeout callback is not called on thread 2.
2121 	 */
2122 	memset(&cb_arg, 0, sizeof(cb_arg));
2123 	set_thread(2);
2124 	poll_thread(2);
2125 	stub_complete_io(g_bdev.io_target, 1);
2126 	spdk_put_io_channel(ch[2]);
2127 	poll_thread(2);
2128 	CU_ASSERT(cb_arg.type == 0);
2129 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2130 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2131 
2132 	set_thread(0);
2133 	poll_thread(0);
2134 	g_teardown_done = false;
2135 	unregister_bdev(&g_bdev);
2136 	spdk_io_device_unregister(&g_io_device, NULL);
2137 	spdk_bdev_finish(finish_cb, NULL);
2138 	spdk_iobuf_finish(finish_cb, NULL);
2139 	poll_threads();
2140 	memset(&g_bdev, 0, sizeof(g_bdev));
2141 	CU_ASSERT(g_teardown_done == true);
2142 	g_teardown_done = false;
2143 	free_threads();
2144 	free_cores();
2145 }
2146 
2147 static bool g_io_done2;
2148 static bool g_lock_lba_range_done;
2149 static bool g_unlock_lba_range_done;
2150 
2151 static void
2152 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2153 {
2154 	g_io_done2 = true;
2155 	spdk_bdev_free_io(bdev_io);
2156 }
2157 
2158 static void
2159 lock_lba_range_done(struct lba_range *range, void *ctx, int status)
2160 {
2161 	g_lock_lba_range_done = true;
2162 }
2163 
2164 static void
2165 unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
2166 {
2167 	g_unlock_lba_range_done = true;
2168 }
2169 
2170 static uint32_t
2171 stub_channel_outstanding_cnt(void *io_target)
2172 {
2173 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
2174 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
2175 	uint32_t outstanding_cnt;
2176 
2177 	outstanding_cnt = ch->outstanding_cnt;
2178 
2179 	spdk_put_io_channel(_ch);
2180 	return outstanding_cnt;
2181 }
2182 
2183 static void
2184 lock_lba_range_then_submit_io(void)
2185 {
2186 	struct spdk_bdev_desc *desc = NULL;
2187 	void *io_target;
2188 	struct spdk_io_channel *io_ch[3];
2189 	struct spdk_bdev_channel *bdev_ch[3];
2190 	struct lba_range *range;
2191 	char buf[4096];
2192 	int ctx0, ctx1, ctx2;
2193 	int rc;
2194 
2195 	setup_test();
2196 
2197 	io_target = g_bdev.io_target;
2198 	desc = g_desc;
2199 
2200 	set_thread(0);
2201 	io_ch[0] = spdk_bdev_get_io_channel(desc);
2202 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
2203 	CU_ASSERT(io_ch[0] != NULL);
2204 
2205 	set_thread(1);
2206 	io_ch[1] = spdk_bdev_get_io_channel(desc);
2207 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
2208 	CU_ASSERT(io_ch[1] != NULL);
2209 
2210 	set_thread(0);
2211 	g_lock_lba_range_done = false;
2212 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
2213 	CU_ASSERT(rc == 0);
2214 	poll_threads();
2215 
2216 	/* The lock should immediately become valid, since there are no outstanding
2217 	 * write I/O.
2218 	 */
2219 	CU_ASSERT(g_lock_lba_range_done == true);
2220 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
2221 	SPDK_CU_ASSERT_FATAL(range != NULL);
2222 	CU_ASSERT(range->offset == 20);
2223 	CU_ASSERT(range->length == 10);
2224 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
2225 
2226 	g_io_done = false;
2227 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2228 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2229 	CU_ASSERT(rc == 0);
2230 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2231 
2232 	stub_complete_io(io_target, 1);
2233 	poll_threads();
2234 	CU_ASSERT(g_io_done == true);
2235 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2236 
2237 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
2238 	 * holding the lock is submitting the write I/O.
2239 	 */
2240 	g_io_done = false;
2241 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2242 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2243 	CU_ASSERT(rc == 0);
2244 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2245 
2246 	stub_complete_io(io_target, 1);
2247 	poll_threads();
2248 	CU_ASSERT(g_io_done == true);
2249 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2250 
2251 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
2252 	set_thread(1);
2253 	g_io_done = false;
2254 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2255 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
2256 	CU_ASSERT(rc == 0);
2257 	poll_threads();
2258 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2259 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2260 	CU_ASSERT(g_io_done == false);
2261 
2262 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
2263 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
2264 	CU_ASSERT(rc == -EINVAL);
2265 
2266 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
2267 	 * The new channel should inherit the active locks from the bdev's internal list.
2268 	 */
2269 	set_thread(2);
2270 	io_ch[2] = spdk_bdev_get_io_channel(desc);
2271 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
2272 	CU_ASSERT(io_ch[2] != NULL);
2273 
2274 	g_io_done2 = false;
2275 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2276 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
2277 	CU_ASSERT(rc == 0);
2278 	poll_threads();
2279 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2280 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2281 	CU_ASSERT(g_io_done2 == false);
2282 
2283 	set_thread(0);
2284 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
2285 	CU_ASSERT(rc == 0);
2286 	poll_threads();
2287 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
2288 
2289 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
2290 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2291 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2292 
2293 	set_thread(1);
2294 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2295 	stub_complete_io(io_target, 1);
2296 	set_thread(2);
2297 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2298 	stub_complete_io(io_target, 1);
2299 
2300 	poll_threads();
2301 	CU_ASSERT(g_io_done == true);
2302 	CU_ASSERT(g_io_done2 == true);
2303 
2304 	/* Tear down the channels */
2305 	set_thread(0);
2306 	spdk_put_io_channel(io_ch[0]);
2307 	set_thread(1);
2308 	spdk_put_io_channel(io_ch[1]);
2309 	set_thread(2);
2310 	spdk_put_io_channel(io_ch[2]);
2311 	poll_threads();
2312 	set_thread(0);
2313 	teardown_test();
2314 }
2315 
2316 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2317  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2318  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2319  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2320  * completes. Test this behavior.
2321  */
2322 static void
2323 unregister_during_reset(void)
2324 {
2325 	struct spdk_io_channel *io_ch[2];
2326 	bool done_reset = false, done_unregister = false;
2327 	int rc;
2328 
2329 	setup_test();
2330 	set_thread(0);
2331 
2332 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2333 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2334 
2335 	set_thread(1);
2336 
2337 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2338 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2339 
2340 	set_thread(0);
2341 
2342 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2343 
2344 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2345 	CU_ASSERT(rc == 0);
2346 
2347 	set_thread(0);
2348 
2349 	poll_thread_times(0, 1);
2350 
2351 	spdk_bdev_close(g_desc);
2352 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2353 
2354 	CU_ASSERT(done_reset == false);
2355 	CU_ASSERT(done_unregister == false);
2356 
2357 	poll_threads();
2358 
2359 	stub_complete_io(g_bdev.io_target, 0);
2360 
2361 	poll_threads();
2362 
2363 	CU_ASSERT(done_reset == true);
2364 	CU_ASSERT(done_unregister == false);
2365 
2366 	spdk_put_io_channel(io_ch[0]);
2367 
2368 	set_thread(1);
2369 
2370 	spdk_put_io_channel(io_ch[1]);
2371 
2372 	poll_threads();
2373 
2374 	CU_ASSERT(done_unregister == true);
2375 
2376 	/* Restore the original g_bdev so that we can use teardown_test(). */
2377 	set_thread(0);
2378 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2379 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2380 	teardown_test();
2381 }
2382 
2383 static void
2384 bdev_init_wt_cb(void *done, int rc)
2385 {
2386 }
2387 
2388 static int
2389 wrong_thread_setup(void)
2390 {
2391 	allocate_cores(1);
2392 	allocate_threads(2);
2393 	set_thread(0);
2394 
2395 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
2396 				ut_accel_ch_destroy_cb, 0, NULL);
2397 	spdk_bdev_initialize(bdev_init_wt_cb, NULL);
2398 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
2399 				sizeof(struct ut_bdev_channel), NULL);
2400 
2401 	set_thread(1);
2402 
2403 	return 0;
2404 }
2405 
2406 static int
2407 wrong_thread_teardown(void)
2408 {
2409 	int rc = 0;
2410 
2411 	set_thread(0);
2412 
2413 	g_teardown_done = false;
2414 	spdk_io_device_unregister(&g_io_device, NULL);
2415 	spdk_bdev_finish(finish_cb, NULL);
2416 	poll_threads();
2417 	memset(&g_bdev, 0, sizeof(g_bdev));
2418 	if (!g_teardown_done) {
2419 		fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__);
2420 		rc = -1;
2421 	}
2422 	g_teardown_done = false;
2423 
2424 	spdk_io_device_unregister(&g_accel_io_device, NULL);
2425 	free_threads();
2426 	free_cores();
2427 
2428 	return rc;
2429 }
2430 
2431 static void
2432 _bdev_unregistered_wt(void *ctx, int rc)
2433 {
2434 	struct spdk_thread **threadp = ctx;
2435 
2436 	*threadp = spdk_get_thread();
2437 }
2438 
2439 static void
2440 spdk_bdev_register_wt(void)
2441 {
2442 	struct spdk_bdev bdev = { 0 };
2443 	int rc;
2444 	struct spdk_thread *unreg_thread;
2445 
2446 	bdev.name = "wt_bdev";
2447 	bdev.fn_table = &fn_table;
2448 	bdev.module = &bdev_ut_if;
2449 	bdev.blocklen = 4096;
2450 	bdev.blockcnt = 1024;
2451 
2452 	/* Can register only on app thread */
2453 	rc = spdk_bdev_register(&bdev);
2454 	CU_ASSERT(rc == -EINVAL);
2455 
2456 	/* Can unregister on any thread */
2457 	set_thread(0);
2458 	rc = spdk_bdev_register(&bdev);
2459 	CU_ASSERT(rc == 0);
2460 	set_thread(1);
2461 	unreg_thread = NULL;
2462 	spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread);
2463 	poll_threads();
2464 	CU_ASSERT(unreg_thread == spdk_get_thread());
2465 
2466 	/* Can unregister by name on any thread */
2467 	set_thread(0);
2468 	rc = spdk_bdev_register(&bdev);
2469 	CU_ASSERT(rc == 0);
2470 	set_thread(1);
2471 	unreg_thread = NULL;
2472 	rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt,
2473 					  &unreg_thread);
2474 	CU_ASSERT(rc == 0);
2475 	poll_threads();
2476 	CU_ASSERT(unreg_thread == spdk_get_thread());
2477 }
2478 
2479 static void
2480 wait_for_examine_cb(void *arg)
2481 {
2482 	struct spdk_thread **thread = arg;
2483 
2484 	*thread = spdk_get_thread();
2485 }
2486 
2487 static void
2488 spdk_bdev_examine_wt(void)
2489 {
2490 	int rc;
2491 	bool save_auto_examine = g_bdev_opts.bdev_auto_examine;
2492 	struct spdk_thread *thread;
2493 
2494 	g_bdev_opts.bdev_auto_examine = false;
2495 
2496 	set_thread(0);
2497 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2498 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2499 	set_thread(1);
2500 
2501 	/* Can examine only on the app thread */
2502 	rc = spdk_bdev_examine("ut_bdev_wt");
2503 	CU_ASSERT(rc == -EINVAL);
2504 	unregister_bdev(&g_bdev);
2505 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2506 
2507 	/* Can wait for examine on app thread, callback called on app thread. */
2508 	set_thread(0);
2509 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2510 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2511 	thread = NULL;
2512 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2513 	CU_ASSERT(rc == 0);
2514 	poll_threads();
2515 	CU_ASSERT(thread == spdk_get_thread());
2516 	unregister_bdev(&g_bdev);
2517 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2518 
2519 	/* Can wait for examine on non-app thread, callback called on same thread. */
2520 	set_thread(0);
2521 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2522 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2523 	thread = NULL;
2524 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2525 	CU_ASSERT(rc == 0);
2526 	poll_threads();
2527 	CU_ASSERT(thread == spdk_get_thread());
2528 	unregister_bdev(&g_bdev);
2529 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2530 
2531 	unregister_bdev(&g_bdev);
2532 	g_bdev_opts.bdev_auto_examine = save_auto_examine;
2533 }
2534 
2535 static void
2536 event_notify_and_close(void)
2537 {
2538 	int resize_notify_count = 0;
2539 	struct spdk_bdev_desc *desc = NULL;
2540 	struct spdk_bdev *bdev;
2541 	int rc;
2542 
2543 	setup_test();
2544 	set_thread(0);
2545 
2546 	/* setup_test() automatically opens the bdev, but this test needs to do
2547 	 * that in a different way. */
2548 	spdk_bdev_close(g_desc);
2549 	poll_threads();
2550 
2551 	set_thread(1);
2552 
2553 	rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc);
2554 	CU_ASSERT(rc == 0);
2555 	SPDK_CU_ASSERT_FATAL(desc != NULL);
2556 
2557 	bdev = spdk_bdev_desc_get_bdev(desc);
2558 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2559 
2560 	/* Test a normal case that a resize event is notified. */
2561 	set_thread(0);
2562 
2563 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2);
2564 	CU_ASSERT(rc == 0);
2565 	CU_ASSERT(bdev->blockcnt == 1024 * 2);
2566 	CU_ASSERT(desc->refs == 1);
2567 	CU_ASSERT(resize_notify_count == 0);
2568 
2569 	poll_threads();
2570 
2571 	CU_ASSERT(desc->refs == 0);
2572 	CU_ASSERT(resize_notify_count == 1);
2573 
2574 	/* Test a complex case if the bdev is closed after two event_notify messages are sent,
2575 	 * then both event_notify messages are discarded and the desc is freed.
2576 	 */
2577 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3);
2578 	CU_ASSERT(rc == 0);
2579 	CU_ASSERT(bdev->blockcnt == 1024 * 3);
2580 	CU_ASSERT(desc->refs == 1);
2581 	CU_ASSERT(resize_notify_count == 1);
2582 
2583 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4);
2584 	CU_ASSERT(rc == 0);
2585 	CU_ASSERT(bdev->blockcnt == 1024 * 4);
2586 	CU_ASSERT(desc->refs == 2);
2587 	CU_ASSERT(resize_notify_count == 1);
2588 
2589 	set_thread(1);
2590 
2591 	spdk_bdev_close(desc);
2592 	CU_ASSERT(desc->closed == true);
2593 	CU_ASSERT(desc->refs == 2);
2594 	CU_ASSERT(resize_notify_count == 1);
2595 
2596 	poll_threads();
2597 
2598 	CU_ASSERT(resize_notify_count == 1);
2599 
2600 	set_thread(0);
2601 
2602 	/* Restore g_desc. Then, we can execute teardown_test(). */
2603 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2604 	teardown_test();
2605 }
2606 
2607 int
2608 main(int argc, char **argv)
2609 {
2610 	CU_pSuite	suite = NULL;
2611 	CU_pSuite	suite_wt = NULL;
2612 	unsigned int	num_failures;
2613 
2614 	CU_initialize_registry();
2615 
2616 	suite = CU_add_suite("bdev", NULL, NULL);
2617 	suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown);
2618 
2619 	CU_ADD_TEST(suite, basic);
2620 	CU_ADD_TEST(suite, unregister_and_close);
2621 	CU_ADD_TEST(suite, unregister_and_close_different_threads);
2622 	CU_ADD_TEST(suite, basic_qos);
2623 	CU_ADD_TEST(suite, put_channel_during_reset);
2624 	CU_ADD_TEST(suite, aborted_reset);
2625 	CU_ADD_TEST(suite, aborted_reset_no_outstanding_io);
2626 	CU_ADD_TEST(suite, io_during_reset);
2627 	CU_ADD_TEST(suite, reset_completions);
2628 	CU_ADD_TEST(suite, io_during_qos_queue);
2629 	CU_ADD_TEST(suite, io_during_qos_reset);
2630 	CU_ADD_TEST(suite, enomem);
2631 	CU_ADD_TEST(suite, enomem_multi_bdev);
2632 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2633 	CU_ADD_TEST(suite, enomem_multi_io_target);
2634 	CU_ADD_TEST(suite, qos_dynamic_enable);
2635 	CU_ADD_TEST(suite, bdev_histograms_mt);
2636 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2637 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2638 	CU_ADD_TEST(suite, unregister_during_reset);
2639 	CU_ADD_TEST(suite_wt, spdk_bdev_register_wt);
2640 	CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt);
2641 	CU_ADD_TEST(suite, event_notify_and_close);
2642 
2643 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2644 	CU_cleanup_registry();
2645 	return num_failures;
2646 }
2647