xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 22c0e978842808113ce4ac166a33734c01d2ce61)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_cunit.h"
8 
9 #include "common/lib/ut_multithread.c"
10 #include "unit/lib/json_mock.c"
11 
12 #include "spdk/config.h"
13 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14 #undef SPDK_CONFIG_VTUNE
15 
16 #include "bdev/bdev.c"
17 
18 #define BDEV_UT_NUM_THREADS 3
19 
20 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
21 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
22 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
23 		int *asc, int *ascq));
24 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
25 	    "test_domain");
26 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
27 	    (struct spdk_memory_domain *domain), 0);
28 DEFINE_STUB(spdk_accel_sequence_finish, int,
29 	    (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg), 0);
30 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq));
31 DEFINE_STUB(spdk_accel_append_copy, int,
32 	    (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, struct iovec *dst_iovs,
33 	     uint32_t dst_iovcnt, struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
34 	     struct iovec *src_iovs, uint32_t src_iovcnt, struct spdk_memory_domain *src_domain,
35 	     void *src_domain_ctx, int flags, spdk_accel_step_cb cb_fn, void *cb_arg), 0);
36 DEFINE_STUB(spdk_accel_get_memory_domain, struct spdk_memory_domain *, (void), NULL);
37 
38 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
39 int
40 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
41 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
42 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
43 {
44 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
45 
46 	cpl_cb(cpl_cb_arg, 0);
47 	return 0;
48 }
49 
50 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
51 int
52 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
53 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
54 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
55 {
56 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
57 
58 	cpl_cb(cpl_cb_arg, 0);
59 	return 0;
60 }
61 
62 static int g_accel_io_device;
63 
64 struct spdk_io_channel *
65 spdk_accel_get_io_channel(void)
66 {
67 	return spdk_get_io_channel(&g_accel_io_device);
68 }
69 
70 struct ut_bdev {
71 	struct spdk_bdev	bdev;
72 	void			*io_target;
73 };
74 
75 struct ut_bdev_channel {
76 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
77 	uint32_t			outstanding_cnt;
78 	uint32_t			avail_cnt;
79 };
80 
81 int g_io_device;
82 struct ut_bdev g_bdev;
83 struct spdk_bdev_desc *g_desc;
84 bool g_teardown_done = false;
85 bool g_get_io_channel = true;
86 bool g_create_ch = true;
87 bool g_init_complete_called = false;
88 bool g_fini_start_called = true;
89 int g_status = 0;
90 int g_count = 0;
91 struct spdk_histogram_data *g_histogram = NULL;
92 
93 static int
94 ut_accel_ch_create_cb(void *io_device, void *ctx)
95 {
96 	return 0;
97 }
98 
99 static void
100 ut_accel_ch_destroy_cb(void *io_device, void *ctx)
101 {
102 }
103 
104 static int
105 stub_create_ch(void *io_device, void *ctx_buf)
106 {
107 	struct ut_bdev_channel *ch = ctx_buf;
108 
109 	if (g_create_ch == false) {
110 		return -1;
111 	}
112 
113 	TAILQ_INIT(&ch->outstanding_io);
114 	ch->outstanding_cnt = 0;
115 	/*
116 	 * When avail gets to 0, the submit_request function will return ENOMEM.
117 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
118 	 *  big value that won't get hit.  The ENOMEM tests can then override this
119 	 *  value to something much smaller to induce ENOMEM conditions.
120 	 */
121 	ch->avail_cnt = 2048;
122 	return 0;
123 }
124 
125 static void
126 stub_destroy_ch(void *io_device, void *ctx_buf)
127 {
128 }
129 
130 static struct spdk_io_channel *
131 stub_get_io_channel(void *ctx)
132 {
133 	struct ut_bdev *ut_bdev = ctx;
134 
135 	if (g_get_io_channel == true) {
136 		return spdk_get_io_channel(ut_bdev->io_target);
137 	} else {
138 		return NULL;
139 	}
140 }
141 
142 static int
143 stub_destruct(void *ctx)
144 {
145 	return 0;
146 }
147 
148 static void
149 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
150 {
151 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
152 	struct spdk_bdev_io *io;
153 
154 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
155 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
156 			io = TAILQ_FIRST(&ch->outstanding_io);
157 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
158 			ch->outstanding_cnt--;
159 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
160 			ch->avail_cnt++;
161 		}
162 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
163 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
164 			if (io == bdev_io->u.abort.bio_to_abort) {
165 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
166 				ch->outstanding_cnt--;
167 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
168 				ch->avail_cnt++;
169 
170 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
171 				return;
172 			}
173 		}
174 
175 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
176 		return;
177 	}
178 
179 	if (ch->avail_cnt > 0) {
180 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
181 		ch->outstanding_cnt++;
182 		ch->avail_cnt--;
183 	} else {
184 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
185 	}
186 }
187 
188 static uint32_t
189 stub_complete_io(void *io_target, uint32_t num_to_complete)
190 {
191 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
192 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
193 	struct spdk_bdev_io *io;
194 	bool complete_all = (num_to_complete == 0);
195 	uint32_t num_completed = 0;
196 
197 	while (complete_all || num_completed < num_to_complete) {
198 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
199 			break;
200 		}
201 		io = TAILQ_FIRST(&ch->outstanding_io);
202 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
203 		ch->outstanding_cnt--;
204 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
205 		ch->avail_cnt++;
206 		num_completed++;
207 	}
208 	spdk_put_io_channel(_ch);
209 	return num_completed;
210 }
211 
212 static bool
213 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
214 {
215 	return true;
216 }
217 
218 static struct spdk_bdev_fn_table fn_table = {
219 	.get_io_channel =	stub_get_io_channel,
220 	.destruct =		stub_destruct,
221 	.submit_request =	stub_submit_request,
222 	.io_type_supported =	stub_io_type_supported,
223 };
224 
225 struct spdk_bdev_module bdev_ut_if;
226 
227 static int
228 module_init(void)
229 {
230 	spdk_bdev_module_init_done(&bdev_ut_if);
231 	return 0;
232 }
233 
234 static void
235 module_fini(void)
236 {
237 }
238 
239 static void
240 init_complete(void)
241 {
242 	g_init_complete_called = true;
243 }
244 
245 static void
246 fini_start(void)
247 {
248 	g_fini_start_called = true;
249 }
250 
251 struct spdk_bdev_module bdev_ut_if = {
252 	.name = "bdev_ut",
253 	.module_init = module_init,
254 	.module_fini = module_fini,
255 	.async_init = true,
256 	.init_complete = init_complete,
257 	.fini_start = fini_start,
258 };
259 
260 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
261 
262 static void
263 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
264 {
265 	memset(ut_bdev, 0, sizeof(*ut_bdev));
266 
267 	ut_bdev->io_target = io_target;
268 	ut_bdev->bdev.ctxt = ut_bdev;
269 	ut_bdev->bdev.name = name;
270 	ut_bdev->bdev.fn_table = &fn_table;
271 	ut_bdev->bdev.module = &bdev_ut_if;
272 	ut_bdev->bdev.blocklen = 4096;
273 	ut_bdev->bdev.blockcnt = 1024;
274 
275 	spdk_bdev_register(&ut_bdev->bdev);
276 }
277 
278 static void
279 unregister_bdev(struct ut_bdev *ut_bdev)
280 {
281 	/* Handle any deferred messages. */
282 	poll_threads();
283 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
284 	/* Handle the async bdev unregister. */
285 	poll_threads();
286 }
287 
288 static void
289 bdev_init_cb(void *done, int rc)
290 {
291 	CU_ASSERT(rc == 0);
292 	*(bool *)done = true;
293 }
294 
295 static void
296 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
297 	       void *event_ctx)
298 {
299 	switch (type) {
300 	case SPDK_BDEV_EVENT_REMOVE:
301 		if (event_ctx != NULL) {
302 			*(bool *)event_ctx = true;
303 		}
304 		break;
305 	case SPDK_BDEV_EVENT_RESIZE:
306 		if (event_ctx != NULL) {
307 			*(int *)event_ctx += 1;
308 		}
309 		break;
310 	default:
311 		CU_ASSERT(false);
312 		break;
313 	}
314 }
315 
316 static void
317 setup_test(void)
318 {
319 	bool done = false;
320 	int rc;
321 
322 	allocate_cores(BDEV_UT_NUM_THREADS);
323 	allocate_threads(BDEV_UT_NUM_THREADS);
324 	set_thread(0);
325 
326 	rc = spdk_iobuf_initialize();
327 	CU_ASSERT(rc == 0);
328 	spdk_bdev_initialize(bdev_init_cb, &done);
329 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
330 				sizeof(struct ut_bdev_channel), NULL);
331 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
332 				ut_accel_ch_destroy_cb, 0, NULL);
333 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
334 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
335 }
336 
337 static void
338 finish_cb(void *cb_arg)
339 {
340 	g_teardown_done = true;
341 }
342 
343 static void
344 teardown_test(void)
345 {
346 	set_thread(0);
347 	g_teardown_done = false;
348 	spdk_bdev_close(g_desc);
349 	g_desc = NULL;
350 	unregister_bdev(&g_bdev);
351 	spdk_io_device_unregister(&g_io_device, NULL);
352 	spdk_bdev_finish(finish_cb, NULL);
353 	spdk_io_device_unregister(&g_accel_io_device, NULL);
354 	spdk_iobuf_finish(finish_cb, NULL);
355 	poll_threads();
356 	memset(&g_bdev, 0, sizeof(g_bdev));
357 	CU_ASSERT(g_teardown_done == true);
358 	g_teardown_done = false;
359 	free_threads();
360 	free_cores();
361 }
362 
363 static uint32_t
364 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
365 {
366 	struct spdk_bdev_io *io;
367 	uint32_t cnt = 0;
368 
369 	TAILQ_FOREACH(io, tailq, internal.link) {
370 		cnt++;
371 	}
372 
373 	return cnt;
374 }
375 
376 static void
377 basic(void)
378 {
379 	g_init_complete_called = false;
380 	setup_test();
381 	CU_ASSERT(g_init_complete_called == true);
382 
383 	set_thread(0);
384 
385 	g_get_io_channel = false;
386 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
387 	CU_ASSERT(g_ut_threads[0].ch == NULL);
388 
389 	g_get_io_channel = true;
390 	g_create_ch = false;
391 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
392 	CU_ASSERT(g_ut_threads[0].ch == NULL);
393 
394 	g_get_io_channel = true;
395 	g_create_ch = true;
396 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
397 	CU_ASSERT(g_ut_threads[0].ch != NULL);
398 	spdk_put_io_channel(g_ut_threads[0].ch);
399 
400 	g_fini_start_called = false;
401 	teardown_test();
402 	CU_ASSERT(g_fini_start_called == true);
403 }
404 
405 static void
406 _bdev_unregistered(void *done, int rc)
407 {
408 	CU_ASSERT(rc == 0);
409 	*(bool *)done = true;
410 }
411 
412 static void
413 unregister_and_close(void)
414 {
415 	bool done, remove_notify;
416 	struct spdk_bdev_desc *desc = NULL;
417 
418 	setup_test();
419 	set_thread(0);
420 
421 	/* setup_test() automatically opens the bdev,
422 	 * but this test needs to do that in a different
423 	 * way. */
424 	spdk_bdev_close(g_desc);
425 	poll_threads();
426 
427 	/* Try hotremoving a bdev with descriptors which don't provide
428 	 * any context to the notification callback */
429 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
430 	SPDK_CU_ASSERT_FATAL(desc != NULL);
431 
432 	/* There is an open descriptor on the device. Unregister it,
433 	 * which can't proceed until the descriptor is closed. */
434 	done = false;
435 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
436 
437 	/* Poll the threads to allow all events to be processed */
438 	poll_threads();
439 
440 	/* Make sure the bdev was not unregistered. We still have a
441 	 * descriptor open */
442 	CU_ASSERT(done == false);
443 
444 	spdk_bdev_close(desc);
445 	poll_threads();
446 	desc = NULL;
447 
448 	/* The unregister should have completed */
449 	CU_ASSERT(done == true);
450 
451 
452 	/* Register the bdev again */
453 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
454 
455 	remove_notify = false;
456 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
457 	SPDK_CU_ASSERT_FATAL(desc != NULL);
458 	CU_ASSERT(remove_notify == false);
459 
460 	/* There is an open descriptor on the device. Unregister it,
461 	 * which can't proceed until the descriptor is closed. */
462 	done = false;
463 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
464 	/* No polling has occurred, so neither of these should execute */
465 	CU_ASSERT(remove_notify == false);
466 	CU_ASSERT(done == false);
467 
468 	/* Prior to the unregister completing, close the descriptor */
469 	spdk_bdev_close(desc);
470 
471 	/* Poll the threads to allow all events to be processed */
472 	poll_threads();
473 
474 	/* Remove notify should not have been called because the
475 	 * descriptor is already closed. */
476 	CU_ASSERT(remove_notify == false);
477 
478 	/* The unregister should have completed */
479 	CU_ASSERT(done == true);
480 
481 	/* Restore the original g_bdev so that we can use teardown_test(). */
482 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
483 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
484 	teardown_test();
485 }
486 
487 static void
488 unregister_and_close_different_threads(void)
489 {
490 	bool done;
491 	struct spdk_bdev_desc *desc = NULL;
492 
493 	setup_test();
494 	set_thread(0);
495 
496 	/* setup_test() automatically opens the bdev,
497 	 * but this test needs to do that in a different
498 	 * way. */
499 	spdk_bdev_close(g_desc);
500 	poll_threads();
501 
502 	set_thread(1);
503 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
504 	SPDK_CU_ASSERT_FATAL(desc != NULL);
505 	done = false;
506 
507 	set_thread(0);
508 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
509 
510 	/* Poll the threads to allow all events to be processed */
511 	poll_threads();
512 
513 	/* Make sure the bdev was not unregistered. We still have a
514 	 * descriptor open */
515 	CU_ASSERT(done == false);
516 
517 	/* Close the descriptor on thread 1.  Poll the thread and confirm the
518 	 * unregister did not complete, since it was unregistered on thread 0.
519 	 */
520 	set_thread(1);
521 	spdk_bdev_close(desc);
522 	poll_thread(1);
523 	CU_ASSERT(done == false);
524 
525 	/* Now poll thread 0 and confirm the unregister completed. */
526 	set_thread(0);
527 	poll_thread(0);
528 	CU_ASSERT(done == true);
529 
530 	/* Restore the original g_bdev so that we can use teardown_test(). */
531 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
532 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
533 	teardown_test();
534 }
535 
536 static void
537 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
538 {
539 	bool *done = cb_arg;
540 
541 	CU_ASSERT(success == true);
542 	*done = true;
543 	spdk_bdev_free_io(bdev_io);
544 }
545 
546 static void
547 put_channel_during_reset(void)
548 {
549 	struct spdk_io_channel *io_ch;
550 	bool done = false;
551 
552 	setup_test();
553 
554 	set_thread(0);
555 	io_ch = spdk_bdev_get_io_channel(g_desc);
556 	CU_ASSERT(io_ch != NULL);
557 
558 	/*
559 	 * Start a reset, but then put the I/O channel before
560 	 *  the deferred messages for the reset get a chance to
561 	 *  execute.
562 	 */
563 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
564 	spdk_put_io_channel(io_ch);
565 	poll_threads();
566 	stub_complete_io(g_bdev.io_target, 0);
567 
568 	teardown_test();
569 }
570 
571 static void
572 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
573 {
574 	enum spdk_bdev_io_status *status = cb_arg;
575 
576 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
577 	spdk_bdev_free_io(bdev_io);
578 }
579 
580 static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
581 
582 static void
583 aborted_reset(void)
584 {
585 	struct spdk_io_channel *io_ch[2];
586 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
587 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
588 
589 	setup_test();
590 
591 	set_thread(0);
592 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
593 	CU_ASSERT(io_ch[0] != NULL);
594 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
595 	poll_threads();
596 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
597 
598 	/*
599 	 * First reset has been submitted on ch0.  Now submit a second
600 	 *  reset on ch1 which will get queued since there is already a
601 	 *  reset in progress.
602 	 */
603 	set_thread(1);
604 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
605 	CU_ASSERT(io_ch[1] != NULL);
606 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
607 	poll_threads();
608 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
609 
610 	/*
611 	 * Now destroy ch1.  This will abort the queued reset.  Check that
612 	 *  the second reset was completed with failed status.  Also check
613 	 *  that bdev->internal.reset_in_progress != NULL, since the
614 	 *  original reset has not been completed yet.  This ensures that
615 	 *  the bdev code is correctly noticing that the failed reset is
616 	 *  *not* the one that had been submitted to the bdev module.
617 	 */
618 	set_thread(1);
619 	spdk_put_io_channel(io_ch[1]);
620 	poll_threads();
621 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
622 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
623 
624 	/*
625 	 * Now complete the first reset, verify that it completed with SUCCESS
626 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
627 	 */
628 	set_thread(0);
629 	spdk_put_io_channel(io_ch[0]);
630 	stub_complete_io(g_bdev.io_target, 0);
631 	poll_threads();
632 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
633 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
634 
635 	teardown_test();
636 }
637 
638 static void
639 aborted_reset_no_outstanding_io(void)
640 {
641 	struct spdk_io_channel *io_ch[2];
642 	struct spdk_bdev_channel *bdev_ch[2];
643 	struct spdk_bdev *bdev[2];
644 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
645 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
646 
647 	setup_test();
648 
649 	/*
650 	 * This time we test the reset without any outstanding IO
651 	 * present on the bdev channel, so both resets should finish
652 	 * immediately.
653 	 */
654 
655 	set_thread(0);
656 	/* Set reset_io_drain_timeout to allow bdev
657 	 * reset to stay pending until we call abort. */
658 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
659 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
660 	bdev[0] = bdev_ch[0]->bdev;
661 	bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
662 	CU_ASSERT(io_ch[0] != NULL);
663 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
664 	poll_threads();
665 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
666 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
667 	spdk_put_io_channel(io_ch[0]);
668 
669 	set_thread(1);
670 	/* Set reset_io_drain_timeout to allow bdev
671 	 * reset to stay pending until we call abort. */
672 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
673 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
674 	bdev[1] = bdev_ch[1]->bdev;
675 	bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
676 	CU_ASSERT(io_ch[1] != NULL);
677 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
678 	poll_threads();
679 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
680 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
681 	spdk_put_io_channel(io_ch[1]);
682 
683 	stub_complete_io(g_bdev.io_target, 0);
684 	poll_threads();
685 
686 	teardown_test();
687 }
688 
689 
690 static void
691 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
692 {
693 	enum spdk_bdev_io_status *status = cb_arg;
694 
695 	*status = bdev_io->internal.status;
696 	spdk_bdev_free_io(bdev_io);
697 }
698 
699 static void
700 io_during_reset(void)
701 {
702 	struct spdk_io_channel *io_ch[2];
703 	struct spdk_bdev_channel *bdev_ch[2];
704 	enum spdk_bdev_io_status status0, status1, status_reset;
705 	int rc;
706 
707 	setup_test();
708 
709 	/*
710 	 * First test normal case - submit an I/O on each of two channels (with no resets)
711 	 *  and verify they complete successfully.
712 	 */
713 	set_thread(0);
714 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
715 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
716 	CU_ASSERT(bdev_ch[0]->flags == 0);
717 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
718 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
719 	CU_ASSERT(rc == 0);
720 
721 	set_thread(1);
722 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
723 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
724 	CU_ASSERT(bdev_ch[1]->flags == 0);
725 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
726 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
727 	CU_ASSERT(rc == 0);
728 
729 	poll_threads();
730 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
731 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
732 
733 	set_thread(0);
734 	stub_complete_io(g_bdev.io_target, 0);
735 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
736 
737 	set_thread(1);
738 	stub_complete_io(g_bdev.io_target, 0);
739 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
740 
741 	/*
742 	 * Now submit a reset, and leave it pending while we submit I/O on two different
743 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
744 	 *  progress.
745 	 */
746 	set_thread(0);
747 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
748 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
749 	CU_ASSERT(rc == 0);
750 
751 	CU_ASSERT(bdev_ch[0]->flags == 0);
752 	CU_ASSERT(bdev_ch[1]->flags == 0);
753 	poll_threads();
754 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
755 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
756 
757 	set_thread(0);
758 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
759 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
760 	CU_ASSERT(rc == 0);
761 
762 	set_thread(1);
763 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
764 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
765 	CU_ASSERT(rc == 0);
766 
767 	/*
768 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
769 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
770 	 */
771 	poll_threads();
772 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
773 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
774 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
775 
776 	/*
777 	 * Complete the reset
778 	 */
779 	set_thread(0);
780 	stub_complete_io(g_bdev.io_target, 0);
781 
782 	/*
783 	 * Only poll thread 0. We should not get a completion.
784 	 */
785 	poll_thread(0);
786 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
787 
788 	/*
789 	 * Poll both thread 0 and 1 so the messages can propagate and we
790 	 * get a completion.
791 	 */
792 	poll_threads();
793 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
794 
795 	spdk_put_io_channel(io_ch[0]);
796 	set_thread(1);
797 	spdk_put_io_channel(io_ch[1]);
798 	poll_threads();
799 
800 	teardown_test();
801 }
802 
803 static uint32_t
804 count_queued_resets(void *io_target)
805 {
806 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
807 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
808 	struct spdk_bdev_io *io;
809 	uint32_t submitted_resets = 0;
810 
811 	TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
812 		if (io->type == SPDK_BDEV_IO_TYPE_RESET) {
813 			submitted_resets++;
814 		}
815 	}
816 
817 	spdk_put_io_channel(_ch);
818 
819 	return submitted_resets;
820 }
821 
822 static void
823 reset_completions(void)
824 {
825 	struct spdk_io_channel *io_ch;
826 	struct spdk_bdev_channel *bdev_ch;
827 	struct spdk_bdev *bdev;
828 	enum spdk_bdev_io_status status0, status_reset;
829 	int rc, iter;
830 
831 	setup_test();
832 
833 	/* This test covers four test cases:
834 	 * 1) reset_io_drain_timeout of a bdev is greater than 0
835 	 * 2) No outstandind IO are present on any bdev channel
836 	 * 3) Outstanding IO finish during bdev reset
837 	 * 4) Outstanding IO do not finish before reset is done waiting
838 	 *    for them.
839 	 *
840 	 * Above conditions mainly affect the timing of bdev reset completion
841 	 * and whether a reset should be skipped via spdk_bdev_io_complete()
842 	 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */
843 
844 	/* Test preparation */
845 	set_thread(0);
846 	io_ch = spdk_bdev_get_io_channel(g_desc);
847 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
848 	CU_ASSERT(bdev_ch->flags == 0);
849 
850 
851 	/* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */
852 	bdev = &g_bdev.bdev;
853 	bdev->reset_io_drain_timeout = 0;
854 
855 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
856 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
857 	CU_ASSERT(rc == 0);
858 	poll_threads();
859 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
860 
861 	/* Call reset completion inside bdev module. */
862 	stub_complete_io(g_bdev.io_target, 0);
863 	poll_threads();
864 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
865 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
866 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
867 
868 
869 	/* Test case 2) no outstanding IO are present. Reset should perform one iteration over
870 	* channels and then be skipped. */
871 	bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
872 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
873 
874 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
875 	CU_ASSERT(rc == 0);
876 	poll_threads();
877 	/* Reset was never submitted to the bdev module. */
878 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
879 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
880 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
881 
882 
883 	/* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate
884 	* wait poller to check for IO completions every second, until reset_io_drain_timeout is
885 	* reached, but finish earlier than this threshold. */
886 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
887 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
888 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
889 	CU_ASSERT(rc == 0);
890 
891 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
892 	CU_ASSERT(rc == 0);
893 	poll_threads();
894 	/* The reset just started and should not have been submitted yet. */
895 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
896 
897 	poll_threads();
898 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
899 	/* Let the poller wait for about half the time then complete outstanding IO. */
900 	for (iter = 0; iter < 2; iter++) {
901 		/* Reset is still processing and not submitted at this point. */
902 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
903 		spdk_delay_us(1000 * 1000);
904 		poll_threads();
905 		poll_threads();
906 	}
907 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
908 	stub_complete_io(g_bdev.io_target, 0);
909 	poll_threads();
910 	spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
911 	poll_threads();
912 	poll_threads();
913 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
914 	/* Sending reset to the bdev module has been skipped. */
915 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
916 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
917 
918 
919 	/* Test case 4) outstanding IO are still present after reset_io_drain_timeout
920 	* seconds have passed. */
921 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
922 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
923 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
924 	CU_ASSERT(rc == 0);
925 
926 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
927 	CU_ASSERT(rc == 0);
928 	poll_threads();
929 	/* The reset just started and should not have been submitted yet. */
930 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
931 
932 	poll_threads();
933 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
934 	/* Let the poller wait for reset_io_drain_timeout seconds. */
935 	for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) {
936 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
937 		spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
938 		poll_threads();
939 		poll_threads();
940 	}
941 
942 	/* After timing out, the reset should have been sent to the module. */
943 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
944 	/* Complete reset submitted to the module and the read IO. */
945 	stub_complete_io(g_bdev.io_target, 0);
946 	poll_threads();
947 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
948 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
949 
950 
951 	/* Destroy the channel and end the test. */
952 	spdk_put_io_channel(io_ch);
953 	poll_threads();
954 
955 	teardown_test();
956 }
957 
958 
959 static void
960 basic_qos(void)
961 {
962 	struct spdk_io_channel *io_ch[2];
963 	struct spdk_bdev_channel *bdev_ch[2];
964 	struct spdk_bdev *bdev;
965 	enum spdk_bdev_io_status status, abort_status;
966 	int rc;
967 
968 	setup_test();
969 
970 	/* Enable QoS */
971 	bdev = &g_bdev.bdev;
972 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
973 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
974 	TAILQ_INIT(&bdev->internal.qos->queued);
975 	/*
976 	 * Enable read/write IOPS, read only byte per second and
977 	 * read/write byte per second rate limits.
978 	 * In this case, all rate limits will take equal effect.
979 	 */
980 	/* 2000 read/write I/O per second, or 2 per millisecond */
981 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
982 	/* 8K read/write byte per millisecond with 4K block size */
983 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
984 	/* 8K read only byte per millisecond with 4K block size */
985 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
986 
987 	g_get_io_channel = true;
988 
989 	set_thread(0);
990 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
991 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
992 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
993 
994 	set_thread(1);
995 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
996 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
997 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
998 
999 	/*
1000 	 * Send an I/O on thread 0, which is where the QoS thread is running.
1001 	 */
1002 	set_thread(0);
1003 	status = SPDK_BDEV_IO_STATUS_PENDING;
1004 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1005 	CU_ASSERT(rc == 0);
1006 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1007 	poll_threads();
1008 	stub_complete_io(g_bdev.io_target, 0);
1009 	poll_threads();
1010 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1011 
1012 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1013 	status = SPDK_BDEV_IO_STATUS_PENDING;
1014 	set_thread(1);
1015 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1016 	CU_ASSERT(rc == 0);
1017 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1018 	poll_threads();
1019 	/* Complete I/O on thread 0. This should not complete the I/O we submitted. */
1020 	set_thread(0);
1021 	stub_complete_io(g_bdev.io_target, 0);
1022 	poll_threads();
1023 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1024 	/* Now complete I/O on original thread 1. */
1025 	set_thread(1);
1026 	poll_threads();
1027 	stub_complete_io(g_bdev.io_target, 0);
1028 	poll_threads();
1029 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1030 
1031 	/* Reset rate limit for the next test cases. */
1032 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
1033 	poll_threads();
1034 
1035 	/*
1036 	 * Test abort request when QoS is enabled.
1037 	 */
1038 
1039 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
1040 	set_thread(0);
1041 	status = SPDK_BDEV_IO_STATUS_PENDING;
1042 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1043 	CU_ASSERT(rc == 0);
1044 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1045 	/* Send an abort to the I/O on the same thread. */
1046 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1047 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
1048 	CU_ASSERT(rc == 0);
1049 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1050 	poll_threads();
1051 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1052 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1053 
1054 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1055 	status = SPDK_BDEV_IO_STATUS_PENDING;
1056 	set_thread(1);
1057 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1058 	CU_ASSERT(rc == 0);
1059 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1060 	poll_threads();
1061 	/* Send an abort to the I/O on the same thread. */
1062 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1063 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
1064 	CU_ASSERT(rc == 0);
1065 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1066 	poll_threads();
1067 	/* Complete the I/O with failure and the abort with success on thread 1. */
1068 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1069 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1070 
1071 	set_thread(0);
1072 
1073 	/*
1074 	 * Close the descriptor only, which should stop the qos channel as
1075 	 * the last descriptor removed.
1076 	 */
1077 	spdk_bdev_close(g_desc);
1078 	poll_threads();
1079 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1080 
1081 	/*
1082 	 * Open the bdev again which shall setup the qos channel as the
1083 	 * channels are valid.
1084 	 */
1085 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1086 	poll_threads();
1087 	CU_ASSERT(bdev->internal.qos->ch != NULL);
1088 
1089 	/* Tear down the channels */
1090 	set_thread(0);
1091 	spdk_put_io_channel(io_ch[0]);
1092 	set_thread(1);
1093 	spdk_put_io_channel(io_ch[1]);
1094 	poll_threads();
1095 	set_thread(0);
1096 
1097 	/* Close the descriptor, which should stop the qos channel */
1098 	spdk_bdev_close(g_desc);
1099 	poll_threads();
1100 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1101 
1102 	/* Open the bdev again, no qos channel setup without valid channels. */
1103 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1104 	poll_threads();
1105 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1106 
1107 	/* Create the channels in reverse order. */
1108 	set_thread(1);
1109 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1110 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1111 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1112 
1113 	set_thread(0);
1114 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1115 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1116 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1117 
1118 	/* Confirm that the qos thread is now thread 1 */
1119 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
1120 
1121 	/* Tear down the channels */
1122 	set_thread(0);
1123 	spdk_put_io_channel(io_ch[0]);
1124 	set_thread(1);
1125 	spdk_put_io_channel(io_ch[1]);
1126 	poll_threads();
1127 
1128 	set_thread(0);
1129 
1130 	teardown_test();
1131 }
1132 
1133 static void
1134 io_during_qos_queue(void)
1135 {
1136 	struct spdk_io_channel *io_ch[2];
1137 	struct spdk_bdev_channel *bdev_ch[2];
1138 	struct spdk_bdev *bdev;
1139 	enum spdk_bdev_io_status status0, status1, status2;
1140 	int rc;
1141 
1142 	setup_test();
1143 	MOCK_SET(spdk_get_ticks, 0);
1144 
1145 	/* Enable QoS */
1146 	bdev = &g_bdev.bdev;
1147 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1148 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1149 	TAILQ_INIT(&bdev->internal.qos->queued);
1150 	/*
1151 	 * Enable read/write IOPS, read only byte per sec, write only
1152 	 * byte per sec and read/write byte per sec rate limits.
1153 	 * In this case, both read only and write only byte per sec
1154 	 * rate limit will take effect.
1155 	 */
1156 	/* 4000 read/write I/O per second, or 4 per millisecond */
1157 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
1158 	/* 8K byte per millisecond with 4K block size */
1159 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1160 	/* 4K byte per millisecond with 4K block size */
1161 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
1162 	/* 4K byte per millisecond with 4K block size */
1163 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
1164 
1165 	g_get_io_channel = true;
1166 
1167 	/* Create channels */
1168 	set_thread(0);
1169 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1170 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1171 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1172 
1173 	set_thread(1);
1174 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1175 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1176 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1177 
1178 	/* Send two read I/Os */
1179 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1180 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1181 	CU_ASSERT(rc == 0);
1182 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1183 	set_thread(0);
1184 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1185 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1186 	CU_ASSERT(rc == 0);
1187 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1188 	/* Send one write I/O */
1189 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
1190 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
1191 	CU_ASSERT(rc == 0);
1192 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
1193 
1194 	/* Complete any I/O that arrived at the disk */
1195 	poll_threads();
1196 	set_thread(1);
1197 	stub_complete_io(g_bdev.io_target, 0);
1198 	set_thread(0);
1199 	stub_complete_io(g_bdev.io_target, 0);
1200 	poll_threads();
1201 
1202 	/* Only one of the two read I/Os should complete. (logical XOR) */
1203 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
1204 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1205 	} else {
1206 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1207 	}
1208 	/* The write I/O should complete. */
1209 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
1210 
1211 	/* Advance in time by a millisecond */
1212 	spdk_delay_us(1000);
1213 
1214 	/* Complete more I/O */
1215 	poll_threads();
1216 	set_thread(1);
1217 	stub_complete_io(g_bdev.io_target, 0);
1218 	set_thread(0);
1219 	stub_complete_io(g_bdev.io_target, 0);
1220 	poll_threads();
1221 
1222 	/* Now the second read I/O should be done */
1223 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
1224 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1225 
1226 	/* Tear down the channels */
1227 	set_thread(1);
1228 	spdk_put_io_channel(io_ch[1]);
1229 	set_thread(0);
1230 	spdk_put_io_channel(io_ch[0]);
1231 	poll_threads();
1232 
1233 	teardown_test();
1234 }
1235 
1236 static void
1237 io_during_qos_reset(void)
1238 {
1239 	struct spdk_io_channel *io_ch[2];
1240 	struct spdk_bdev_channel *bdev_ch[2];
1241 	struct spdk_bdev *bdev;
1242 	enum spdk_bdev_io_status status0, status1, reset_status;
1243 	int rc;
1244 
1245 	setup_test();
1246 	MOCK_SET(spdk_get_ticks, 0);
1247 
1248 	/* Enable QoS */
1249 	bdev = &g_bdev.bdev;
1250 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
1251 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1252 	TAILQ_INIT(&bdev->internal.qos->queued);
1253 	/*
1254 	 * Enable read/write IOPS, write only byte per sec and
1255 	 * read/write byte per second rate limits.
1256 	 * In this case, read/write byte per second rate limit will
1257 	 * take effect first.
1258 	 */
1259 	/* 2000 read/write I/O per second, or 2 per millisecond */
1260 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1261 	/* 4K byte per millisecond with 4K block size */
1262 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
1263 	/* 8K byte per millisecond with 4K block size */
1264 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
1265 
1266 	g_get_io_channel = true;
1267 
1268 	/* Create channels */
1269 	set_thread(0);
1270 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1271 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1272 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1273 
1274 	set_thread(1);
1275 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1276 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1277 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1278 
1279 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1280 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1281 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1282 	CU_ASSERT(rc == 0);
1283 	set_thread(0);
1284 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1285 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1286 	CU_ASSERT(rc == 0);
1287 
1288 	poll_threads();
1289 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1290 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1291 
1292 	/* Reset the bdev. */
1293 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1294 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1295 	CU_ASSERT(rc == 0);
1296 
1297 	/* Complete any I/O that arrived at the disk */
1298 	poll_threads();
1299 	set_thread(1);
1300 	stub_complete_io(g_bdev.io_target, 0);
1301 	set_thread(0);
1302 	stub_complete_io(g_bdev.io_target, 0);
1303 	poll_threads();
1304 
1305 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1306 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1307 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1308 
1309 	/* Tear down the channels */
1310 	set_thread(1);
1311 	spdk_put_io_channel(io_ch[1]);
1312 	set_thread(0);
1313 	spdk_put_io_channel(io_ch[0]);
1314 	poll_threads();
1315 
1316 	teardown_test();
1317 }
1318 
1319 static void
1320 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1321 {
1322 	enum spdk_bdev_io_status *status = cb_arg;
1323 
1324 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1325 	spdk_bdev_free_io(bdev_io);
1326 }
1327 
1328 static void
1329 enomem(void)
1330 {
1331 	struct spdk_io_channel *io_ch;
1332 	struct spdk_bdev_channel *bdev_ch;
1333 	struct spdk_bdev_shared_resource *shared_resource;
1334 	struct ut_bdev_channel *ut_ch;
1335 	const uint32_t IO_ARRAY_SIZE = 64;
1336 	const uint32_t AVAIL = 20;
1337 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1338 	uint32_t nomem_cnt, i;
1339 	struct spdk_bdev_io *first_io;
1340 	int rc;
1341 
1342 	setup_test();
1343 
1344 	set_thread(0);
1345 	io_ch = spdk_bdev_get_io_channel(g_desc);
1346 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1347 	shared_resource = bdev_ch->shared_resource;
1348 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1349 	ut_ch->avail_cnt = AVAIL;
1350 
1351 	/* First submit a number of IOs equal to what the channel can support. */
1352 	for (i = 0; i < AVAIL; i++) {
1353 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1354 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1355 		CU_ASSERT(rc == 0);
1356 	}
1357 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1358 
1359 	/*
1360 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1361 	 *  the enomem_io list.
1362 	 */
1363 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1364 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1365 	CU_ASSERT(rc == 0);
1366 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1367 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1368 
1369 	/*
1370 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1371 	 *  the first_io above.
1372 	 */
1373 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1374 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1375 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1376 		CU_ASSERT(rc == 0);
1377 	}
1378 
1379 	/* Assert that first_io is still at the head of the list. */
1380 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1381 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1382 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1383 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1384 
1385 	/*
1386 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1387 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1388 	 *  list.
1389 	 */
1390 	stub_complete_io(g_bdev.io_target, 1);
1391 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1392 
1393 	/*
1394 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1395 	 *  and we should see I/O get resubmitted to the test bdev module.
1396 	 */
1397 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1398 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1399 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1400 
1401 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1402 	stub_complete_io(g_bdev.io_target, 1);
1403 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1404 
1405 	/*
1406 	 * Send a reset and confirm that all I/O are completed, including the ones that
1407 	 *  were queued on the nomem_io list.
1408 	 */
1409 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1410 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1411 	poll_threads();
1412 	CU_ASSERT(rc == 0);
1413 	/* This will complete the reset. */
1414 	stub_complete_io(g_bdev.io_target, 0);
1415 
1416 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1417 	CU_ASSERT(shared_resource->io_outstanding == 0);
1418 
1419 	spdk_put_io_channel(io_ch);
1420 	poll_threads();
1421 	teardown_test();
1422 }
1423 
1424 static void
1425 enomem_multi_bdev(void)
1426 {
1427 	struct spdk_io_channel *io_ch;
1428 	struct spdk_bdev_channel *bdev_ch;
1429 	struct spdk_bdev_shared_resource *shared_resource;
1430 	struct ut_bdev_channel *ut_ch;
1431 	const uint32_t IO_ARRAY_SIZE = 64;
1432 	const uint32_t AVAIL = 20;
1433 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1434 	uint32_t i;
1435 	struct ut_bdev *second_bdev;
1436 	struct spdk_bdev_desc *second_desc = NULL;
1437 	struct spdk_bdev_channel *second_bdev_ch;
1438 	struct spdk_io_channel *second_ch;
1439 	int rc;
1440 
1441 	setup_test();
1442 
1443 	/* Register second bdev with the same io_target  */
1444 	second_bdev = calloc(1, sizeof(*second_bdev));
1445 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1446 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1447 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1448 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1449 
1450 	set_thread(0);
1451 	io_ch = spdk_bdev_get_io_channel(g_desc);
1452 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1453 	shared_resource = bdev_ch->shared_resource;
1454 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1455 	ut_ch->avail_cnt = AVAIL;
1456 
1457 	second_ch = spdk_bdev_get_io_channel(second_desc);
1458 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1459 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1460 
1461 	/* Saturate io_target through bdev A. */
1462 	for (i = 0; i < AVAIL; i++) {
1463 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1464 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1465 		CU_ASSERT(rc == 0);
1466 	}
1467 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1468 
1469 	/*
1470 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1471 	 * and then go onto the nomem_io list.
1472 	 */
1473 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1474 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1475 	CU_ASSERT(rc == 0);
1476 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1477 
1478 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1479 	stub_complete_io(g_bdev.io_target, AVAIL);
1480 
1481 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1482 	CU_ASSERT(shared_resource->io_outstanding == 1);
1483 
1484 	/* Now complete our retried I/O  */
1485 	stub_complete_io(g_bdev.io_target, 1);
1486 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1487 
1488 	spdk_put_io_channel(io_ch);
1489 	spdk_put_io_channel(second_ch);
1490 	spdk_bdev_close(second_desc);
1491 	unregister_bdev(second_bdev);
1492 	poll_threads();
1493 	free(second_bdev);
1494 	teardown_test();
1495 }
1496 
1497 static void
1498 enomem_multi_bdev_unregister(void)
1499 {
1500 	struct spdk_io_channel *io_ch;
1501 	struct spdk_bdev_channel *bdev_ch;
1502 	struct spdk_bdev_shared_resource *shared_resource;
1503 	struct ut_bdev_channel *ut_ch;
1504 	const uint32_t IO_ARRAY_SIZE = 64;
1505 	const uint32_t AVAIL = 20;
1506 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1507 	uint32_t i;
1508 	int rc;
1509 
1510 	setup_test();
1511 
1512 	set_thread(0);
1513 	io_ch = spdk_bdev_get_io_channel(g_desc);
1514 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1515 	shared_resource = bdev_ch->shared_resource;
1516 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1517 	ut_ch->avail_cnt = AVAIL;
1518 
1519 	/* Saturate io_target through the bdev. */
1520 	for (i = 0; i < AVAIL; i++) {
1521 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1522 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1523 		CU_ASSERT(rc == 0);
1524 	}
1525 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1526 
1527 	/*
1528 	 * Now submit I/O through the bdev. This should fail with ENOMEM
1529 	 * and then go onto the nomem_io list.
1530 	 */
1531 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1532 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1533 	CU_ASSERT(rc == 0);
1534 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1535 
1536 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
1537 	unregister_bdev(&g_bdev);
1538 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
1539 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1540 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
1541 
1542 	/* Complete the bdev's I/O. */
1543 	stub_complete_io(g_bdev.io_target, AVAIL);
1544 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1545 
1546 	spdk_put_io_channel(io_ch);
1547 	poll_threads();
1548 	teardown_test();
1549 }
1550 
1551 static void
1552 enomem_multi_io_target(void)
1553 {
1554 	struct spdk_io_channel *io_ch;
1555 	struct spdk_bdev_channel *bdev_ch;
1556 	struct ut_bdev_channel *ut_ch;
1557 	const uint32_t IO_ARRAY_SIZE = 64;
1558 	const uint32_t AVAIL = 20;
1559 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1560 	uint32_t i;
1561 	int new_io_device;
1562 	struct ut_bdev *second_bdev;
1563 	struct spdk_bdev_desc *second_desc = NULL;
1564 	struct spdk_bdev_channel *second_bdev_ch;
1565 	struct spdk_io_channel *second_ch;
1566 	int rc;
1567 
1568 	setup_test();
1569 
1570 	/* Create new io_target and a second bdev using it */
1571 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1572 				sizeof(struct ut_bdev_channel), NULL);
1573 	second_bdev = calloc(1, sizeof(*second_bdev));
1574 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1575 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1576 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1577 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1578 
1579 	set_thread(0);
1580 	io_ch = spdk_bdev_get_io_channel(g_desc);
1581 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1582 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1583 	ut_ch->avail_cnt = AVAIL;
1584 
1585 	/* Different io_target should imply a different shared_resource */
1586 	second_ch = spdk_bdev_get_io_channel(second_desc);
1587 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1588 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1589 
1590 	/* Saturate io_target through bdev A. */
1591 	for (i = 0; i < AVAIL; i++) {
1592 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1593 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1594 		CU_ASSERT(rc == 0);
1595 	}
1596 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1597 
1598 	/* Issue one more I/O to fill ENOMEM list. */
1599 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1600 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1601 	CU_ASSERT(rc == 0);
1602 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1603 
1604 	/*
1605 	 * Now submit I/O through the second bdev. This should go through and complete
1606 	 * successfully because we're using a different io_device underneath.
1607 	 */
1608 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1609 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1610 	CU_ASSERT(rc == 0);
1611 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1612 	stub_complete_io(second_bdev->io_target, 1);
1613 
1614 	/* Cleanup; Complete outstanding I/O. */
1615 	stub_complete_io(g_bdev.io_target, AVAIL);
1616 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1617 	/* Complete the ENOMEM I/O */
1618 	stub_complete_io(g_bdev.io_target, 1);
1619 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1620 
1621 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1622 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1623 	spdk_put_io_channel(io_ch);
1624 	spdk_put_io_channel(second_ch);
1625 	spdk_bdev_close(second_desc);
1626 	unregister_bdev(second_bdev);
1627 	spdk_io_device_unregister(&new_io_device, NULL);
1628 	poll_threads();
1629 	free(second_bdev);
1630 	teardown_test();
1631 }
1632 
1633 static void
1634 qos_dynamic_enable_done(void *cb_arg, int status)
1635 {
1636 	int *rc = cb_arg;
1637 	*rc = status;
1638 }
1639 
1640 static void
1641 qos_dynamic_enable(void)
1642 {
1643 	struct spdk_io_channel *io_ch[2];
1644 	struct spdk_bdev_channel *bdev_ch[2];
1645 	struct spdk_bdev *bdev;
1646 	enum spdk_bdev_io_status bdev_io_status[2];
1647 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1648 	int status, second_status, rc, i;
1649 
1650 	setup_test();
1651 	MOCK_SET(spdk_get_ticks, 0);
1652 
1653 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1654 		limits[i] = UINT64_MAX;
1655 	}
1656 
1657 	bdev = &g_bdev.bdev;
1658 
1659 	g_get_io_channel = true;
1660 
1661 	/* Create channels */
1662 	set_thread(0);
1663 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1664 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1665 	CU_ASSERT(bdev_ch[0]->flags == 0);
1666 
1667 	set_thread(1);
1668 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1669 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1670 	CU_ASSERT(bdev_ch[1]->flags == 0);
1671 
1672 	set_thread(0);
1673 
1674 	/*
1675 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1676 	 * Read only byte and Write only byte per second
1677 	 * rate limits.
1678 	 * More than 10 I/Os allowed per timeslice.
1679 	 */
1680 	status = -1;
1681 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1682 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1683 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1684 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1685 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1686 	poll_threads();
1687 	CU_ASSERT(status == 0);
1688 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1689 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1690 
1691 	/*
1692 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1693 	 * Additional I/O will then be queued.
1694 	 */
1695 	set_thread(0);
1696 	for (i = 0; i < 10; i++) {
1697 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1698 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1699 		CU_ASSERT(rc == 0);
1700 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1701 		poll_thread(0);
1702 		stub_complete_io(g_bdev.io_target, 0);
1703 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1704 	}
1705 
1706 	/*
1707 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1708 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1709 	 *  1) are not aborted
1710 	 *  2) are sent back to their original thread for resubmission
1711 	 */
1712 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1713 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1714 	CU_ASSERT(rc == 0);
1715 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1716 	set_thread(1);
1717 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1718 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1719 	CU_ASSERT(rc == 0);
1720 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1721 	poll_threads();
1722 
1723 	/*
1724 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1725 	 * Read only byte rate limits
1726 	 */
1727 	status = -1;
1728 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1729 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1730 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1731 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1732 	poll_threads();
1733 	CU_ASSERT(status == 0);
1734 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1735 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1736 
1737 	/* Disable QoS: Write only Byte per second rate limit */
1738 	status = -1;
1739 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1740 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1741 	poll_threads();
1742 	CU_ASSERT(status == 0);
1743 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1744 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1745 
1746 	/*
1747 	 * All I/O should have been resubmitted back on their original thread.  Complete
1748 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1749 	 */
1750 	set_thread(0);
1751 	stub_complete_io(g_bdev.io_target, 0);
1752 	poll_threads();
1753 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1754 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1755 
1756 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1757 	set_thread(1);
1758 	stub_complete_io(g_bdev.io_target, 0);
1759 	poll_threads();
1760 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1761 
1762 	/* Disable QoS again */
1763 	status = -1;
1764 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1765 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1766 	poll_threads();
1767 	CU_ASSERT(status == 0); /* This should succeed */
1768 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1769 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1770 
1771 	/* Enable QoS on thread 0 */
1772 	status = -1;
1773 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1774 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1775 	poll_threads();
1776 	CU_ASSERT(status == 0);
1777 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1778 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1779 
1780 	/* Disable QoS on thread 1 */
1781 	set_thread(1);
1782 	status = -1;
1783 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1784 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1785 	/* Don't poll yet. This should leave the channels with QoS enabled */
1786 	CU_ASSERT(status == -1);
1787 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1788 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1789 
1790 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1791 	second_status = 0;
1792 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1793 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1794 	poll_threads();
1795 	CU_ASSERT(status == 0); /* The disable should succeed */
1796 	CU_ASSERT(second_status < 0); /* The enable should fail */
1797 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1798 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1799 
1800 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1801 	status = -1;
1802 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1803 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1804 	poll_threads();
1805 	CU_ASSERT(status == 0);
1806 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1807 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1808 
1809 	/* Tear down the channels */
1810 	set_thread(0);
1811 	spdk_put_io_channel(io_ch[0]);
1812 	set_thread(1);
1813 	spdk_put_io_channel(io_ch[1]);
1814 	poll_threads();
1815 
1816 	set_thread(0);
1817 	teardown_test();
1818 }
1819 
1820 static void
1821 histogram_status_cb(void *cb_arg, int status)
1822 {
1823 	g_status = status;
1824 }
1825 
1826 static void
1827 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1828 {
1829 	g_status = status;
1830 	g_histogram = histogram;
1831 }
1832 
1833 static void
1834 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1835 		   uint64_t total, uint64_t so_far)
1836 {
1837 	g_count += count;
1838 }
1839 
1840 static void
1841 bdev_histograms_mt(void)
1842 {
1843 	struct spdk_io_channel *ch[2];
1844 	struct spdk_histogram_data *histogram;
1845 	uint8_t buf[4096];
1846 	int status = false;
1847 	int rc;
1848 
1849 
1850 	setup_test();
1851 
1852 	set_thread(0);
1853 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1854 	CU_ASSERT(ch[0] != NULL);
1855 
1856 	set_thread(1);
1857 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1858 	CU_ASSERT(ch[1] != NULL);
1859 
1860 
1861 	/* Enable histogram */
1862 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1863 	poll_threads();
1864 	CU_ASSERT(g_status == 0);
1865 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1866 
1867 	/* Allocate histogram */
1868 	histogram = spdk_histogram_data_alloc();
1869 
1870 	/* Check if histogram is zeroed */
1871 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1872 	poll_threads();
1873 	CU_ASSERT(g_status == 0);
1874 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1875 
1876 	g_count = 0;
1877 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1878 
1879 	CU_ASSERT(g_count == 0);
1880 
1881 	set_thread(0);
1882 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1883 	CU_ASSERT(rc == 0);
1884 
1885 	spdk_delay_us(10);
1886 	stub_complete_io(g_bdev.io_target, 1);
1887 	poll_threads();
1888 	CU_ASSERT(status == true);
1889 
1890 
1891 	set_thread(1);
1892 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1893 	CU_ASSERT(rc == 0);
1894 
1895 	spdk_delay_us(10);
1896 	stub_complete_io(g_bdev.io_target, 1);
1897 	poll_threads();
1898 	CU_ASSERT(status == true);
1899 
1900 	set_thread(0);
1901 
1902 	/* Check if histogram gathered data from all I/O channels */
1903 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1904 	poll_threads();
1905 	CU_ASSERT(g_status == 0);
1906 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1907 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1908 
1909 	g_count = 0;
1910 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1911 	CU_ASSERT(g_count == 2);
1912 
1913 	/* Disable histogram */
1914 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1915 	poll_threads();
1916 	CU_ASSERT(g_status == 0);
1917 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1918 
1919 	spdk_histogram_data_free(histogram);
1920 
1921 	/* Tear down the channels */
1922 	set_thread(0);
1923 	spdk_put_io_channel(ch[0]);
1924 	set_thread(1);
1925 	spdk_put_io_channel(ch[1]);
1926 	poll_threads();
1927 	set_thread(0);
1928 	teardown_test();
1929 
1930 }
1931 
1932 struct timeout_io_cb_arg {
1933 	struct iovec iov;
1934 	uint8_t type;
1935 };
1936 
1937 static int
1938 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1939 {
1940 	struct spdk_bdev_io *bdev_io;
1941 	int n = 0;
1942 
1943 	if (!ch) {
1944 		return -1;
1945 	}
1946 
1947 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1948 		n++;
1949 	}
1950 
1951 	return n;
1952 }
1953 
1954 static void
1955 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1956 {
1957 	struct timeout_io_cb_arg *ctx = cb_arg;
1958 
1959 	ctx->type = bdev_io->type;
1960 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1961 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1962 }
1963 
1964 static bool g_io_done;
1965 
1966 static void
1967 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1968 {
1969 	g_io_done = true;
1970 	spdk_bdev_free_io(bdev_io);
1971 }
1972 
1973 static void
1974 bdev_set_io_timeout_mt(void)
1975 {
1976 	struct spdk_io_channel *ch[3];
1977 	struct spdk_bdev_channel *bdev_ch[3];
1978 	struct timeout_io_cb_arg cb_arg;
1979 
1980 	setup_test();
1981 
1982 	g_bdev.bdev.optimal_io_boundary = 16;
1983 	g_bdev.bdev.split_on_optimal_io_boundary = true;
1984 
1985 	set_thread(0);
1986 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1987 	CU_ASSERT(ch[0] != NULL);
1988 
1989 	set_thread(1);
1990 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1991 	CU_ASSERT(ch[1] != NULL);
1992 
1993 	set_thread(2);
1994 	ch[2] = spdk_bdev_get_io_channel(g_desc);
1995 	CU_ASSERT(ch[2] != NULL);
1996 
1997 	/* Multi-thread mode
1998 	 * 1, Check the poller was registered successfully
1999 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
2000 	 * 3, Check the link int the bdev_ch works right.
2001 	 * 4, Close desc and put io channel during the timeout poller is polling
2002 	 */
2003 
2004 	/* In desc thread set the timeout */
2005 	set_thread(0);
2006 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2007 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
2008 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
2009 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
2010 
2011 	/* check the IO submitted list and timeout handler */
2012 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
2013 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
2014 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
2015 
2016 	set_thread(1);
2017 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2018 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
2019 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
2020 
2021 	/* Now test that a single-vector command is split correctly.
2022 	 * Offset 14, length 8, payload 0xF000
2023 	 *  Child - Offset 14, length 2, payload 0xF000
2024 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2025 	 *
2026 	 * Set up the expected values before calling spdk_bdev_read_blocks
2027 	 */
2028 	set_thread(2);
2029 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
2030 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
2031 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
2032 
2033 	set_thread(0);
2034 	memset(&cb_arg, 0, sizeof(cb_arg));
2035 	spdk_delay_us(3 * spdk_get_ticks_hz());
2036 	poll_threads();
2037 	CU_ASSERT(cb_arg.type == 0);
2038 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2039 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2040 
2041 	/* Now the time reach the limit */
2042 	spdk_delay_us(3 * spdk_get_ticks_hz());
2043 	poll_thread(0);
2044 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2045 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2046 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2047 	stub_complete_io(g_bdev.io_target, 1);
2048 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
2049 
2050 	memset(&cb_arg, 0, sizeof(cb_arg));
2051 	set_thread(1);
2052 	poll_thread(1);
2053 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2054 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2055 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2056 	stub_complete_io(g_bdev.io_target, 1);
2057 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
2058 
2059 	memset(&cb_arg, 0, sizeof(cb_arg));
2060 	set_thread(2);
2061 	poll_thread(2);
2062 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2063 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2064 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
2065 	stub_complete_io(g_bdev.io_target, 1);
2066 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
2067 	stub_complete_io(g_bdev.io_target, 1);
2068 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
2069 
2070 	/* Run poll_timeout_done() it means complete the timeout poller */
2071 	set_thread(0);
2072 	poll_thread(0);
2073 	CU_ASSERT(g_desc->refs == 0);
2074 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2075 	set_thread(1);
2076 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
2077 	set_thread(2);
2078 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
2079 
2080 	/* Trigger timeout poller to run again, desc->refs is incremented.
2081 	 * In thread 0 we destroy the io channel before timeout poller runs.
2082 	 * Timeout callback is not called on thread 0.
2083 	 */
2084 	spdk_delay_us(6 * spdk_get_ticks_hz());
2085 	memset(&cb_arg, 0, sizeof(cb_arg));
2086 	set_thread(0);
2087 	stub_complete_io(g_bdev.io_target, 1);
2088 	spdk_put_io_channel(ch[0]);
2089 	poll_thread(0);
2090 	CU_ASSERT(g_desc->refs == 1)
2091 	CU_ASSERT(cb_arg.type == 0);
2092 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2093 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2094 
2095 	/* In thread 1 timeout poller runs then we destroy the io channel
2096 	 * Timeout callback is called on thread 1.
2097 	 */
2098 	memset(&cb_arg, 0, sizeof(cb_arg));
2099 	set_thread(1);
2100 	poll_thread(1);
2101 	stub_complete_io(g_bdev.io_target, 1);
2102 	spdk_put_io_channel(ch[1]);
2103 	poll_thread(1);
2104 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2105 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2106 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
2107 
2108 	/* Close the desc.
2109 	 * Unregister the timeout poller first.
2110 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
2111 	 */
2112 	set_thread(0);
2113 	spdk_bdev_close(g_desc);
2114 	CU_ASSERT(g_desc->refs == 1);
2115 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
2116 
2117 	/* Timeout poller runs on thread 2 then we destroy the io channel.
2118 	 * Desc is closed so we would exit the timeout poller directly.
2119 	 * timeout callback is not called on thread 2.
2120 	 */
2121 	memset(&cb_arg, 0, sizeof(cb_arg));
2122 	set_thread(2);
2123 	poll_thread(2);
2124 	stub_complete_io(g_bdev.io_target, 1);
2125 	spdk_put_io_channel(ch[2]);
2126 	poll_thread(2);
2127 	CU_ASSERT(cb_arg.type == 0);
2128 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2129 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2130 
2131 	set_thread(0);
2132 	poll_thread(0);
2133 	g_teardown_done = false;
2134 	unregister_bdev(&g_bdev);
2135 	spdk_io_device_unregister(&g_io_device, NULL);
2136 	spdk_bdev_finish(finish_cb, NULL);
2137 	spdk_iobuf_finish(finish_cb, NULL);
2138 	poll_threads();
2139 	memset(&g_bdev, 0, sizeof(g_bdev));
2140 	CU_ASSERT(g_teardown_done == true);
2141 	g_teardown_done = false;
2142 	free_threads();
2143 	free_cores();
2144 }
2145 
2146 static bool g_io_done2;
2147 static bool g_lock_lba_range_done;
2148 static bool g_unlock_lba_range_done;
2149 
2150 static void
2151 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2152 {
2153 	g_io_done2 = true;
2154 	spdk_bdev_free_io(bdev_io);
2155 }
2156 
2157 static void
2158 lock_lba_range_done(void *ctx, int status)
2159 {
2160 	g_lock_lba_range_done = true;
2161 }
2162 
2163 static void
2164 unlock_lba_range_done(void *ctx, int status)
2165 {
2166 	g_unlock_lba_range_done = true;
2167 }
2168 
2169 static uint32_t
2170 stub_channel_outstanding_cnt(void *io_target)
2171 {
2172 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
2173 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
2174 	uint32_t outstanding_cnt;
2175 
2176 	outstanding_cnt = ch->outstanding_cnt;
2177 
2178 	spdk_put_io_channel(_ch);
2179 	return outstanding_cnt;
2180 }
2181 
2182 static void
2183 lock_lba_range_then_submit_io(void)
2184 {
2185 	struct spdk_bdev_desc *desc = NULL;
2186 	void *io_target;
2187 	struct spdk_io_channel *io_ch[3];
2188 	struct spdk_bdev_channel *bdev_ch[3];
2189 	struct lba_range *range;
2190 	char buf[4096];
2191 	int ctx0, ctx1, ctx2;
2192 	int rc;
2193 
2194 	setup_test();
2195 
2196 	io_target = g_bdev.io_target;
2197 	desc = g_desc;
2198 
2199 	set_thread(0);
2200 	io_ch[0] = spdk_bdev_get_io_channel(desc);
2201 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
2202 	CU_ASSERT(io_ch[0] != NULL);
2203 
2204 	set_thread(1);
2205 	io_ch[1] = spdk_bdev_get_io_channel(desc);
2206 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
2207 	CU_ASSERT(io_ch[1] != NULL);
2208 
2209 	set_thread(0);
2210 	g_lock_lba_range_done = false;
2211 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
2212 	CU_ASSERT(rc == 0);
2213 	poll_threads();
2214 
2215 	/* The lock should immediately become valid, since there are no outstanding
2216 	 * write I/O.
2217 	 */
2218 	CU_ASSERT(g_lock_lba_range_done == true);
2219 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
2220 	SPDK_CU_ASSERT_FATAL(range != NULL);
2221 	CU_ASSERT(range->offset == 20);
2222 	CU_ASSERT(range->length == 10);
2223 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
2224 
2225 	g_io_done = false;
2226 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2227 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2228 	CU_ASSERT(rc == 0);
2229 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2230 
2231 	stub_complete_io(io_target, 1);
2232 	poll_threads();
2233 	CU_ASSERT(g_io_done == true);
2234 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2235 
2236 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
2237 	 * holding the lock is submitting the write I/O.
2238 	 */
2239 	g_io_done = false;
2240 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2241 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2242 	CU_ASSERT(rc == 0);
2243 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2244 
2245 	stub_complete_io(io_target, 1);
2246 	poll_threads();
2247 	CU_ASSERT(g_io_done == true);
2248 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2249 
2250 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
2251 	set_thread(1);
2252 	g_io_done = false;
2253 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2254 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
2255 	CU_ASSERT(rc == 0);
2256 	poll_threads();
2257 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2258 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2259 	CU_ASSERT(g_io_done == false);
2260 
2261 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
2262 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
2263 	CU_ASSERT(rc == -EINVAL);
2264 
2265 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
2266 	 * The new channel should inherit the active locks from the bdev's internal list.
2267 	 */
2268 	set_thread(2);
2269 	io_ch[2] = spdk_bdev_get_io_channel(desc);
2270 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
2271 	CU_ASSERT(io_ch[2] != NULL);
2272 
2273 	g_io_done2 = false;
2274 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2275 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
2276 	CU_ASSERT(rc == 0);
2277 	poll_threads();
2278 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2279 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2280 	CU_ASSERT(g_io_done2 == false);
2281 
2282 	set_thread(0);
2283 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
2284 	CU_ASSERT(rc == 0);
2285 	poll_threads();
2286 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
2287 
2288 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
2289 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2290 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2291 
2292 	set_thread(1);
2293 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2294 	stub_complete_io(io_target, 1);
2295 	set_thread(2);
2296 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2297 	stub_complete_io(io_target, 1);
2298 
2299 	poll_threads();
2300 	CU_ASSERT(g_io_done == true);
2301 	CU_ASSERT(g_io_done2 == true);
2302 
2303 	/* Tear down the channels */
2304 	set_thread(0);
2305 	spdk_put_io_channel(io_ch[0]);
2306 	set_thread(1);
2307 	spdk_put_io_channel(io_ch[1]);
2308 	set_thread(2);
2309 	spdk_put_io_channel(io_ch[2]);
2310 	poll_threads();
2311 	set_thread(0);
2312 	teardown_test();
2313 }
2314 
2315 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2316  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2317  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2318  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2319  * completes. Test this behavior.
2320  */
2321 static void
2322 unregister_during_reset(void)
2323 {
2324 	struct spdk_io_channel *io_ch[2];
2325 	bool done_reset = false, done_unregister = false;
2326 	int rc;
2327 
2328 	setup_test();
2329 	set_thread(0);
2330 
2331 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2332 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2333 
2334 	set_thread(1);
2335 
2336 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2337 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2338 
2339 	set_thread(0);
2340 
2341 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2342 
2343 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2344 	CU_ASSERT(rc == 0);
2345 
2346 	set_thread(0);
2347 
2348 	poll_thread_times(0, 1);
2349 
2350 	spdk_bdev_close(g_desc);
2351 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2352 
2353 	CU_ASSERT(done_reset == false);
2354 	CU_ASSERT(done_unregister == false);
2355 
2356 	poll_threads();
2357 
2358 	stub_complete_io(g_bdev.io_target, 0);
2359 
2360 	poll_threads();
2361 
2362 	CU_ASSERT(done_reset == true);
2363 	CU_ASSERT(done_unregister == false);
2364 
2365 	spdk_put_io_channel(io_ch[0]);
2366 
2367 	set_thread(1);
2368 
2369 	spdk_put_io_channel(io_ch[1]);
2370 
2371 	poll_threads();
2372 
2373 	CU_ASSERT(done_unregister == true);
2374 
2375 	/* Restore the original g_bdev so that we can use teardown_test(). */
2376 	set_thread(0);
2377 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2378 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2379 	teardown_test();
2380 }
2381 
2382 static void
2383 bdev_init_wt_cb(void *done, int rc)
2384 {
2385 }
2386 
2387 static int
2388 wrong_thread_setup(void)
2389 {
2390 	allocate_cores(1);
2391 	allocate_threads(2);
2392 	set_thread(0);
2393 
2394 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
2395 				ut_accel_ch_destroy_cb, 0, NULL);
2396 	spdk_bdev_initialize(bdev_init_wt_cb, NULL);
2397 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
2398 				sizeof(struct ut_bdev_channel), NULL);
2399 
2400 	set_thread(1);
2401 
2402 	return 0;
2403 }
2404 
2405 static int
2406 wrong_thread_teardown(void)
2407 {
2408 	int rc = 0;
2409 
2410 	set_thread(0);
2411 
2412 	g_teardown_done = false;
2413 	spdk_io_device_unregister(&g_io_device, NULL);
2414 	spdk_bdev_finish(finish_cb, NULL);
2415 	poll_threads();
2416 	memset(&g_bdev, 0, sizeof(g_bdev));
2417 	if (!g_teardown_done) {
2418 		fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__);
2419 		rc = -1;
2420 	}
2421 	g_teardown_done = false;
2422 
2423 	spdk_io_device_unregister(&g_accel_io_device, NULL);
2424 	free_threads();
2425 	free_cores();
2426 
2427 	return rc;
2428 }
2429 
2430 static void
2431 _bdev_unregistered_wt(void *ctx, int rc)
2432 {
2433 	struct spdk_thread **threadp = ctx;
2434 
2435 	*threadp = spdk_get_thread();
2436 }
2437 
2438 static void
2439 spdk_bdev_register_wt(void)
2440 {
2441 	struct spdk_bdev bdev = { 0 };
2442 	int rc;
2443 	struct spdk_thread *unreg_thread;
2444 
2445 	bdev.name = "wt_bdev";
2446 	bdev.fn_table = &fn_table;
2447 	bdev.module = &bdev_ut_if;
2448 	bdev.blocklen = 4096;
2449 	bdev.blockcnt = 1024;
2450 
2451 	/* Can register only on app thread */
2452 	rc = spdk_bdev_register(&bdev);
2453 	CU_ASSERT(rc == -EINVAL);
2454 
2455 	/* Can unregister on any thread */
2456 	set_thread(0);
2457 	rc = spdk_bdev_register(&bdev);
2458 	CU_ASSERT(rc == 0);
2459 	set_thread(1);
2460 	unreg_thread = NULL;
2461 	spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread);
2462 	poll_threads();
2463 	CU_ASSERT(unreg_thread == spdk_get_thread());
2464 
2465 	/* Can unregister by name on any thread */
2466 	set_thread(0);
2467 	rc = spdk_bdev_register(&bdev);
2468 	CU_ASSERT(rc == 0);
2469 	set_thread(1);
2470 	unreg_thread = NULL;
2471 	rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt,
2472 					  &unreg_thread);
2473 	CU_ASSERT(rc == 0);
2474 	poll_threads();
2475 	CU_ASSERT(unreg_thread == spdk_get_thread());
2476 }
2477 
2478 static void
2479 wait_for_examine_cb(void *arg)
2480 {
2481 	struct spdk_thread **thread = arg;
2482 
2483 	*thread = spdk_get_thread();
2484 }
2485 
2486 static void
2487 spdk_bdev_examine_wt(void)
2488 {
2489 	int rc;
2490 	bool save_auto_examine = g_bdev_opts.bdev_auto_examine;
2491 	struct spdk_thread *thread;
2492 
2493 	g_bdev_opts.bdev_auto_examine = false;
2494 
2495 	set_thread(0);
2496 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2497 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2498 	set_thread(1);
2499 
2500 	/* Can examine only on the app thread */
2501 	rc = spdk_bdev_examine("ut_bdev_wt");
2502 	CU_ASSERT(rc == -EINVAL);
2503 	unregister_bdev(&g_bdev);
2504 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2505 
2506 	/* Can wait for examine on app thread, callback called on app thread. */
2507 	set_thread(0);
2508 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2509 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2510 	thread = NULL;
2511 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2512 	CU_ASSERT(rc == 0);
2513 	poll_threads();
2514 	CU_ASSERT(thread == spdk_get_thread());
2515 	unregister_bdev(&g_bdev);
2516 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2517 
2518 	/* Can wait for examine on non-app thread, callback called on same thread. */
2519 	set_thread(0);
2520 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2521 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2522 	thread = NULL;
2523 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2524 	CU_ASSERT(rc == 0);
2525 	poll_threads();
2526 	CU_ASSERT(thread == spdk_get_thread());
2527 	unregister_bdev(&g_bdev);
2528 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2529 
2530 	unregister_bdev(&g_bdev);
2531 	g_bdev_opts.bdev_auto_examine = save_auto_examine;
2532 }
2533 
2534 static void
2535 event_notify_and_close(void)
2536 {
2537 	int resize_notify_count = 0;
2538 	struct spdk_bdev_desc *desc = NULL;
2539 	struct spdk_bdev *bdev;
2540 	int rc;
2541 
2542 	setup_test();
2543 	set_thread(0);
2544 
2545 	/* setup_test() automatically opens the bdev, but this test needs to do
2546 	 * that in a different way. */
2547 	spdk_bdev_close(g_desc);
2548 	poll_threads();
2549 
2550 	set_thread(1);
2551 
2552 	rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc);
2553 	CU_ASSERT(rc == 0);
2554 	SPDK_CU_ASSERT_FATAL(desc != NULL);
2555 
2556 	bdev = spdk_bdev_desc_get_bdev(desc);
2557 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
2558 
2559 	/* Test a normal case that a resize event is notified. */
2560 	set_thread(0);
2561 
2562 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2);
2563 	CU_ASSERT(rc == 0);
2564 	CU_ASSERT(bdev->blockcnt == 1024 * 2);
2565 	CU_ASSERT(desc->refs == 1);
2566 	CU_ASSERT(resize_notify_count == 0);
2567 
2568 	poll_threads();
2569 
2570 	CU_ASSERT(desc->refs == 0);
2571 	CU_ASSERT(resize_notify_count == 1);
2572 
2573 	/* Test a complex case if the bdev is closed after two event_notify messages are sent,
2574 	 * then both event_notify messages are discarded and the desc is freed.
2575 	 */
2576 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3);
2577 	CU_ASSERT(rc == 0);
2578 	CU_ASSERT(bdev->blockcnt == 1024 * 3);
2579 	CU_ASSERT(desc->refs == 1);
2580 	CU_ASSERT(resize_notify_count == 1);
2581 
2582 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4);
2583 	CU_ASSERT(rc == 0);
2584 	CU_ASSERT(bdev->blockcnt == 1024 * 4);
2585 	CU_ASSERT(desc->refs == 2);
2586 	CU_ASSERT(resize_notify_count == 1);
2587 
2588 	set_thread(1);
2589 
2590 	spdk_bdev_close(desc);
2591 	CU_ASSERT(desc->closed == true);
2592 	CU_ASSERT(desc->refs == 2);
2593 	CU_ASSERT(resize_notify_count == 1);
2594 
2595 	poll_threads();
2596 
2597 	CU_ASSERT(resize_notify_count == 1);
2598 
2599 	set_thread(0);
2600 
2601 	/* Restore g_desc. Then, we can execute teardown_test(). */
2602 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2603 	teardown_test();
2604 }
2605 
2606 int
2607 main(int argc, char **argv)
2608 {
2609 	CU_pSuite	suite = NULL;
2610 	CU_pSuite	suite_wt = NULL;
2611 	unsigned int	num_failures;
2612 
2613 	CU_set_error_action(CUEA_ABORT);
2614 	CU_initialize_registry();
2615 
2616 	suite = CU_add_suite("bdev", NULL, NULL);
2617 	suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown);
2618 
2619 	CU_ADD_TEST(suite, basic);
2620 	CU_ADD_TEST(suite, unregister_and_close);
2621 	CU_ADD_TEST(suite, unregister_and_close_different_threads);
2622 	CU_ADD_TEST(suite, basic_qos);
2623 	CU_ADD_TEST(suite, put_channel_during_reset);
2624 	CU_ADD_TEST(suite, aborted_reset);
2625 	CU_ADD_TEST(suite, aborted_reset_no_outstanding_io);
2626 	CU_ADD_TEST(suite, io_during_reset);
2627 	CU_ADD_TEST(suite, reset_completions);
2628 	CU_ADD_TEST(suite, io_during_qos_queue);
2629 	CU_ADD_TEST(suite, io_during_qos_reset);
2630 	CU_ADD_TEST(suite, enomem);
2631 	CU_ADD_TEST(suite, enomem_multi_bdev);
2632 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2633 	CU_ADD_TEST(suite, enomem_multi_io_target);
2634 	CU_ADD_TEST(suite, qos_dynamic_enable);
2635 	CU_ADD_TEST(suite, bdev_histograms_mt);
2636 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2637 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2638 	CU_ADD_TEST(suite, unregister_during_reset);
2639 	CU_ADD_TEST(suite_wt, spdk_bdev_register_wt);
2640 	CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt);
2641 	CU_ADD_TEST(suite, event_notify_and_close);
2642 
2643 	CU_basic_set_mode(CU_BRM_VERBOSE);
2644 	CU_basic_run_tests();
2645 	num_failures = CU_get_number_of_failures();
2646 	CU_cleanup_registry();
2647 	return num_failures;
2648 }
2649