xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 0098e636761237b77c12c30c2408263a5d2260cc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk_cunit.h"
7 
8 #include "common/lib/ut_multithread.c"
9 #include "unit/lib/json_mock.c"
10 
11 #include "spdk/config.h"
12 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
13 #undef SPDK_CONFIG_VTUNE
14 
15 #include "bdev/bdev.c"
16 
17 #define BDEV_UT_NUM_THREADS 3
18 
19 DEFINE_STUB(spdk_notify_send, uint64_t, (const char *type, const char *ctx), 0);
20 DEFINE_STUB(spdk_notify_type_register, struct spdk_notify_type *, (const char *type), NULL);
21 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io, int *sc, int *sk,
22 		int *asc, int *ascq));
23 DEFINE_STUB(spdk_memory_domain_get_dma_device_id, const char *, (struct spdk_memory_domain *domain),
24 	    "test_domain");
25 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
26 	    (struct spdk_memory_domain *domain), 0);
27 
28 DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
29 int
30 spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
31 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
32 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
33 {
34 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
35 
36 	cpl_cb(cpl_cb_arg, 0);
37 	return 0;
38 }
39 
40 DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
41 int
42 spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
43 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
44 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
45 {
46 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
47 
48 	cpl_cb(cpl_cb_arg, 0);
49 	return 0;
50 }
51 
52 struct ut_bdev {
53 	struct spdk_bdev	bdev;
54 	void			*io_target;
55 };
56 
57 struct ut_bdev_channel {
58 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
59 	uint32_t			outstanding_cnt;
60 	uint32_t			avail_cnt;
61 };
62 
63 int g_io_device;
64 struct ut_bdev g_bdev;
65 struct spdk_bdev_desc *g_desc;
66 bool g_teardown_done = false;
67 bool g_get_io_channel = true;
68 bool g_create_ch = true;
69 bool g_init_complete_called = false;
70 bool g_fini_start_called = true;
71 int g_status = 0;
72 int g_count = 0;
73 struct spdk_histogram_data *g_histogram = NULL;
74 
75 static int
76 stub_create_ch(void *io_device, void *ctx_buf)
77 {
78 	struct ut_bdev_channel *ch = ctx_buf;
79 
80 	if (g_create_ch == false) {
81 		return -1;
82 	}
83 
84 	TAILQ_INIT(&ch->outstanding_io);
85 	ch->outstanding_cnt = 0;
86 	/*
87 	 * When avail gets to 0, the submit_request function will return ENOMEM.
88 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
89 	 *  big value that won't get hit.  The ENOMEM tests can then override this
90 	 *  value to something much smaller to induce ENOMEM conditions.
91 	 */
92 	ch->avail_cnt = 2048;
93 	return 0;
94 }
95 
96 static void
97 stub_destroy_ch(void *io_device, void *ctx_buf)
98 {
99 }
100 
101 static struct spdk_io_channel *
102 stub_get_io_channel(void *ctx)
103 {
104 	struct ut_bdev *ut_bdev = ctx;
105 
106 	if (g_get_io_channel == true) {
107 		return spdk_get_io_channel(ut_bdev->io_target);
108 	} else {
109 		return NULL;
110 	}
111 }
112 
113 static int
114 stub_destruct(void *ctx)
115 {
116 	return 0;
117 }
118 
119 static void
120 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
121 {
122 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
123 	struct spdk_bdev_io *io;
124 
125 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
126 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
127 			io = TAILQ_FIRST(&ch->outstanding_io);
128 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
129 			ch->outstanding_cnt--;
130 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
131 			ch->avail_cnt++;
132 		}
133 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
134 		TAILQ_FOREACH(io, &ch->outstanding_io, module_link) {
135 			if (io == bdev_io->u.abort.bio_to_abort) {
136 				TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
137 				ch->outstanding_cnt--;
138 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
139 				ch->avail_cnt++;
140 
141 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
142 				return;
143 			}
144 		}
145 
146 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
147 		return;
148 	}
149 
150 	if (ch->avail_cnt > 0) {
151 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
152 		ch->outstanding_cnt++;
153 		ch->avail_cnt--;
154 	} else {
155 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
156 	}
157 }
158 
159 static uint32_t
160 stub_complete_io(void *io_target, uint32_t num_to_complete)
161 {
162 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
163 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
164 	struct spdk_bdev_io *io;
165 	bool complete_all = (num_to_complete == 0);
166 	uint32_t num_completed = 0;
167 
168 	while (complete_all || num_completed < num_to_complete) {
169 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
170 			break;
171 		}
172 		io = TAILQ_FIRST(&ch->outstanding_io);
173 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
174 		ch->outstanding_cnt--;
175 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
176 		ch->avail_cnt++;
177 		num_completed++;
178 	}
179 	spdk_put_io_channel(_ch);
180 	return num_completed;
181 }
182 
183 static bool
184 stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
185 {
186 	return true;
187 }
188 
189 static struct spdk_bdev_fn_table fn_table = {
190 	.get_io_channel =	stub_get_io_channel,
191 	.destruct =		stub_destruct,
192 	.submit_request =	stub_submit_request,
193 	.io_type_supported =	stub_io_type_supported,
194 };
195 
196 struct spdk_bdev_module bdev_ut_if;
197 
198 static int
199 module_init(void)
200 {
201 	spdk_bdev_module_init_done(&bdev_ut_if);
202 	return 0;
203 }
204 
205 static void
206 module_fini(void)
207 {
208 }
209 
210 static void
211 init_complete(void)
212 {
213 	g_init_complete_called = true;
214 }
215 
216 static void
217 fini_start(void)
218 {
219 	g_fini_start_called = true;
220 }
221 
222 struct spdk_bdev_module bdev_ut_if = {
223 	.name = "bdev_ut",
224 	.module_init = module_init,
225 	.module_fini = module_fini,
226 	.async_init = true,
227 	.init_complete = init_complete,
228 	.fini_start = fini_start,
229 };
230 
231 SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
232 
233 static void
234 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
235 {
236 	memset(ut_bdev, 0, sizeof(*ut_bdev));
237 
238 	ut_bdev->io_target = io_target;
239 	ut_bdev->bdev.ctxt = ut_bdev;
240 	ut_bdev->bdev.name = name;
241 	ut_bdev->bdev.fn_table = &fn_table;
242 	ut_bdev->bdev.module = &bdev_ut_if;
243 	ut_bdev->bdev.blocklen = 4096;
244 	ut_bdev->bdev.blockcnt = 1024;
245 
246 	spdk_bdev_register(&ut_bdev->bdev);
247 }
248 
249 static void
250 unregister_bdev(struct ut_bdev *ut_bdev)
251 {
252 	/* Handle any deferred messages. */
253 	poll_threads();
254 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
255 	/* Handle the async bdev unregister. */
256 	poll_threads();
257 }
258 
259 static void
260 bdev_init_cb(void *done, int rc)
261 {
262 	CU_ASSERT(rc == 0);
263 	*(bool *)done = true;
264 }
265 
266 static void
267 _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
268 	       void *event_ctx)
269 {
270 	switch (type) {
271 	case SPDK_BDEV_EVENT_REMOVE:
272 		if (event_ctx != NULL) {
273 			*(bool *)event_ctx = true;
274 		}
275 		break;
276 	default:
277 		CU_ASSERT(false);
278 		break;
279 	}
280 }
281 
282 static void
283 setup_test(void)
284 {
285 	bool done = false;
286 
287 	allocate_cores(BDEV_UT_NUM_THREADS);
288 	allocate_threads(BDEV_UT_NUM_THREADS);
289 	set_thread(0);
290 	spdk_bdev_initialize(bdev_init_cb, &done);
291 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
292 				sizeof(struct ut_bdev_channel), NULL);
293 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
294 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
295 }
296 
297 static void
298 finish_cb(void *cb_arg)
299 {
300 	g_teardown_done = true;
301 }
302 
303 static void
304 teardown_test(void)
305 {
306 	set_thread(0);
307 	g_teardown_done = false;
308 	spdk_bdev_close(g_desc);
309 	g_desc = NULL;
310 	unregister_bdev(&g_bdev);
311 	spdk_io_device_unregister(&g_io_device, NULL);
312 	spdk_bdev_finish(finish_cb, NULL);
313 	poll_threads();
314 	memset(&g_bdev, 0, sizeof(g_bdev));
315 	CU_ASSERT(g_teardown_done == true);
316 	g_teardown_done = false;
317 	free_threads();
318 	free_cores();
319 }
320 
321 static uint32_t
322 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
323 {
324 	struct spdk_bdev_io *io;
325 	uint32_t cnt = 0;
326 
327 	TAILQ_FOREACH(io, tailq, internal.link) {
328 		cnt++;
329 	}
330 
331 	return cnt;
332 }
333 
334 static void
335 basic(void)
336 {
337 	g_init_complete_called = false;
338 	setup_test();
339 	CU_ASSERT(g_init_complete_called == true);
340 
341 	set_thread(0);
342 
343 	g_get_io_channel = false;
344 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
345 	CU_ASSERT(g_ut_threads[0].ch == NULL);
346 
347 	g_get_io_channel = true;
348 	g_create_ch = false;
349 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
350 	CU_ASSERT(g_ut_threads[0].ch == NULL);
351 
352 	g_get_io_channel = true;
353 	g_create_ch = true;
354 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
355 	CU_ASSERT(g_ut_threads[0].ch != NULL);
356 	spdk_put_io_channel(g_ut_threads[0].ch);
357 
358 	g_fini_start_called = false;
359 	teardown_test();
360 	CU_ASSERT(g_fini_start_called == true);
361 }
362 
363 static void
364 _bdev_unregistered(void *done, int rc)
365 {
366 	CU_ASSERT(rc == 0);
367 	*(bool *)done = true;
368 }
369 
370 static void
371 unregister_and_close(void)
372 {
373 	bool done, remove_notify;
374 	struct spdk_bdev_desc *desc = NULL;
375 
376 	setup_test();
377 	set_thread(0);
378 
379 	/* setup_test() automatically opens the bdev,
380 	 * but this test needs to do that in a different
381 	 * way. */
382 	spdk_bdev_close(g_desc);
383 	poll_threads();
384 
385 	/* Try hotremoving a bdev with descriptors which don't provide
386 	 * any context to the notification callback */
387 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
388 	SPDK_CU_ASSERT_FATAL(desc != NULL);
389 
390 	/* There is an open descriptor on the device. Unregister it,
391 	 * which can't proceed until the descriptor is closed. */
392 	done = false;
393 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
394 
395 	/* Poll the threads to allow all events to be processed */
396 	poll_threads();
397 
398 	/* Make sure the bdev was not unregistered. We still have a
399 	 * descriptor open */
400 	CU_ASSERT(done == false);
401 
402 	spdk_bdev_close(desc);
403 	poll_threads();
404 	desc = NULL;
405 
406 	/* The unregister should have completed */
407 	CU_ASSERT(done == true);
408 
409 
410 	/* Register the bdev again */
411 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
412 
413 	remove_notify = false;
414 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
415 	SPDK_CU_ASSERT_FATAL(desc != NULL);
416 	CU_ASSERT(remove_notify == false);
417 
418 	/* There is an open descriptor on the device. Unregister it,
419 	 * which can't proceed until the descriptor is closed. */
420 	done = false;
421 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
422 	/* No polling has occurred, so neither of these should execute */
423 	CU_ASSERT(remove_notify == false);
424 	CU_ASSERT(done == false);
425 
426 	/* Prior to the unregister completing, close the descriptor */
427 	spdk_bdev_close(desc);
428 
429 	/* Poll the threads to allow all events to be processed */
430 	poll_threads();
431 
432 	/* Remove notify should not have been called because the
433 	 * descriptor is already closed. */
434 	CU_ASSERT(remove_notify == false);
435 
436 	/* The unregister should have completed */
437 	CU_ASSERT(done == true);
438 
439 	/* Restore the original g_bdev so that we can use teardown_test(). */
440 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
441 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
442 	teardown_test();
443 }
444 
445 static void
446 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
447 {
448 	bool *done = cb_arg;
449 
450 	CU_ASSERT(success == true);
451 	*done = true;
452 	spdk_bdev_free_io(bdev_io);
453 }
454 
455 static void
456 put_channel_during_reset(void)
457 {
458 	struct spdk_io_channel *io_ch;
459 	bool done = false;
460 
461 	setup_test();
462 
463 	set_thread(0);
464 	io_ch = spdk_bdev_get_io_channel(g_desc);
465 	CU_ASSERT(io_ch != NULL);
466 
467 	/*
468 	 * Start a reset, but then put the I/O channel before
469 	 *  the deferred messages for the reset get a chance to
470 	 *  execute.
471 	 */
472 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
473 	spdk_put_io_channel(io_ch);
474 	poll_threads();
475 	stub_complete_io(g_bdev.io_target, 0);
476 
477 	teardown_test();
478 }
479 
480 static void
481 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
482 {
483 	enum spdk_bdev_io_status *status = cb_arg;
484 
485 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
486 	spdk_bdev_free_io(bdev_io);
487 }
488 
489 static void
490 aborted_reset(void)
491 {
492 	struct spdk_io_channel *io_ch[2];
493 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
494 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
495 
496 	setup_test();
497 
498 	set_thread(0);
499 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
500 	CU_ASSERT(io_ch[0] != NULL);
501 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
502 	poll_threads();
503 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
504 
505 	/*
506 	 * First reset has been submitted on ch0.  Now submit a second
507 	 *  reset on ch1 which will get queued since there is already a
508 	 *  reset in progress.
509 	 */
510 	set_thread(1);
511 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
512 	CU_ASSERT(io_ch[1] != NULL);
513 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
514 	poll_threads();
515 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
516 
517 	/*
518 	 * Now destroy ch1.  This will abort the queued reset.  Check that
519 	 *  the second reset was completed with failed status.  Also check
520 	 *  that bdev->internal.reset_in_progress != NULL, since the
521 	 *  original reset has not been completed yet.  This ensures that
522 	 *  the bdev code is correctly noticing that the failed reset is
523 	 *  *not* the one that had been submitted to the bdev module.
524 	 */
525 	set_thread(1);
526 	spdk_put_io_channel(io_ch[1]);
527 	poll_threads();
528 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
529 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
530 
531 	/*
532 	 * Now complete the first reset, verify that it completed with SUCCESS
533 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
534 	 */
535 	set_thread(0);
536 	spdk_put_io_channel(io_ch[0]);
537 	stub_complete_io(g_bdev.io_target, 0);
538 	poll_threads();
539 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
540 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
541 
542 	teardown_test();
543 }
544 
545 static void
546 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
547 {
548 	enum spdk_bdev_io_status *status = cb_arg;
549 
550 	*status = bdev_io->internal.status;
551 	spdk_bdev_free_io(bdev_io);
552 }
553 
554 static void
555 io_during_reset(void)
556 {
557 	struct spdk_io_channel *io_ch[2];
558 	struct spdk_bdev_channel *bdev_ch[2];
559 	enum spdk_bdev_io_status status0, status1, status_reset;
560 	int rc;
561 
562 	setup_test();
563 
564 	/*
565 	 * First test normal case - submit an I/O on each of two channels (with no resets)
566 	 *  and verify they complete successfully.
567 	 */
568 	set_thread(0);
569 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
570 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
571 	CU_ASSERT(bdev_ch[0]->flags == 0);
572 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
573 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
574 	CU_ASSERT(rc == 0);
575 
576 	set_thread(1);
577 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
578 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
579 	CU_ASSERT(bdev_ch[1]->flags == 0);
580 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
581 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
582 	CU_ASSERT(rc == 0);
583 
584 	poll_threads();
585 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
586 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
587 
588 	set_thread(0);
589 	stub_complete_io(g_bdev.io_target, 0);
590 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
591 
592 	set_thread(1);
593 	stub_complete_io(g_bdev.io_target, 0);
594 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
595 
596 	/*
597 	 * Now submit a reset, and leave it pending while we submit I/O on two different
598 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
599 	 *  progress.
600 	 */
601 	set_thread(0);
602 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
603 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
604 	CU_ASSERT(rc == 0);
605 
606 	CU_ASSERT(bdev_ch[0]->flags == 0);
607 	CU_ASSERT(bdev_ch[1]->flags == 0);
608 	poll_threads();
609 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
610 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
611 
612 	set_thread(0);
613 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
614 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
615 	CU_ASSERT(rc == 0);
616 
617 	set_thread(1);
618 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
619 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
620 	CU_ASSERT(rc == 0);
621 
622 	/*
623 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
624 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
625 	 */
626 	poll_threads();
627 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
628 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
629 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
630 
631 	/*
632 	 * Complete the reset
633 	 */
634 	set_thread(0);
635 	stub_complete_io(g_bdev.io_target, 0);
636 
637 	/*
638 	 * Only poll thread 0. We should not get a completion.
639 	 */
640 	poll_thread(0);
641 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
642 
643 	/*
644 	 * Poll both thread 0 and 1 so the messages can propagate and we
645 	 * get a completion.
646 	 */
647 	poll_threads();
648 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
649 
650 	spdk_put_io_channel(io_ch[0]);
651 	set_thread(1);
652 	spdk_put_io_channel(io_ch[1]);
653 	poll_threads();
654 
655 	teardown_test();
656 }
657 
658 static void
659 basic_qos(void)
660 {
661 	struct spdk_io_channel *io_ch[2];
662 	struct spdk_bdev_channel *bdev_ch[2];
663 	struct spdk_bdev *bdev;
664 	enum spdk_bdev_io_status status, abort_status;
665 	int rc;
666 
667 	setup_test();
668 
669 	/* Enable QoS */
670 	bdev = &g_bdev.bdev;
671 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
672 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
673 	TAILQ_INIT(&bdev->internal.qos->queued);
674 	/*
675 	 * Enable read/write IOPS, read only byte per second and
676 	 * read/write byte per second rate limits.
677 	 * In this case, all rate limits will take equal effect.
678 	 */
679 	/* 2000 read/write I/O per second, or 2 per millisecond */
680 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
681 	/* 8K read/write byte per millisecond with 4K block size */
682 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
683 	/* 8K read only byte per millisecond with 4K block size */
684 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
685 
686 	g_get_io_channel = true;
687 
688 	set_thread(0);
689 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
690 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
691 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
692 
693 	set_thread(1);
694 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
695 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
696 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
697 
698 	/*
699 	 * Send an I/O on thread 0, which is where the QoS thread is running.
700 	 */
701 	set_thread(0);
702 	status = SPDK_BDEV_IO_STATUS_PENDING;
703 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
704 	CU_ASSERT(rc == 0);
705 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
706 	poll_threads();
707 	stub_complete_io(g_bdev.io_target, 0);
708 	poll_threads();
709 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
710 
711 	/* Send an I/O on thread 1. The QoS thread is not running here. */
712 	status = SPDK_BDEV_IO_STATUS_PENDING;
713 	set_thread(1);
714 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
715 	CU_ASSERT(rc == 0);
716 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
717 	poll_threads();
718 	/* Complete I/O on thread 1. This should not complete the I/O we submitted */
719 	stub_complete_io(g_bdev.io_target, 0);
720 	poll_threads();
721 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
722 	/* Now complete I/O on thread 0 */
723 	set_thread(0);
724 	poll_threads();
725 	stub_complete_io(g_bdev.io_target, 0);
726 	poll_threads();
727 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
728 
729 	/* Reset rate limit for the next test cases. */
730 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
731 	poll_threads();
732 
733 	/*
734 	 * Test abort request when QoS is enabled.
735 	 */
736 
737 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
738 	set_thread(0);
739 	status = SPDK_BDEV_IO_STATUS_PENDING;
740 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
741 	CU_ASSERT(rc == 0);
742 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
743 	/* Send an abort to the I/O on the same thread. */
744 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
745 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
746 	CU_ASSERT(rc == 0);
747 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
748 	poll_threads();
749 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
750 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
751 
752 	/* Send an I/O on thread 1. The QoS thread is not running here. */
753 	status = SPDK_BDEV_IO_STATUS_PENDING;
754 	set_thread(1);
755 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
756 	CU_ASSERT(rc == 0);
757 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
758 	poll_threads();
759 	/* Send an abort to the I/O on the same thread. */
760 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
761 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
762 	CU_ASSERT(rc == 0);
763 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
764 	poll_threads();
765 	/* Complete the I/O with failure and the abort with success on thread 1. */
766 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
767 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
768 
769 	set_thread(0);
770 
771 	/*
772 	 * Close the descriptor only, which should stop the qos channel as
773 	 * the last descriptor removed.
774 	 */
775 	spdk_bdev_close(g_desc);
776 	poll_threads();
777 	CU_ASSERT(bdev->internal.qos->ch == NULL);
778 
779 	/*
780 	 * Open the bdev again which shall setup the qos channel as the
781 	 * channels are valid.
782 	 */
783 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
784 	poll_threads();
785 	CU_ASSERT(bdev->internal.qos->ch != NULL);
786 
787 	/* Tear down the channels */
788 	set_thread(0);
789 	spdk_put_io_channel(io_ch[0]);
790 	set_thread(1);
791 	spdk_put_io_channel(io_ch[1]);
792 	poll_threads();
793 	set_thread(0);
794 
795 	/* Close the descriptor, which should stop the qos channel */
796 	spdk_bdev_close(g_desc);
797 	poll_threads();
798 	CU_ASSERT(bdev->internal.qos->ch == NULL);
799 
800 	/* Open the bdev again, no qos channel setup without valid channels. */
801 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
802 	poll_threads();
803 	CU_ASSERT(bdev->internal.qos->ch == NULL);
804 
805 	/* Create the channels in reverse order. */
806 	set_thread(1);
807 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
808 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
809 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
810 
811 	set_thread(0);
812 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
813 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
814 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
815 
816 	/* Confirm that the qos thread is now thread 1 */
817 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
818 
819 	/* Tear down the channels */
820 	set_thread(0);
821 	spdk_put_io_channel(io_ch[0]);
822 	set_thread(1);
823 	spdk_put_io_channel(io_ch[1]);
824 	poll_threads();
825 
826 	set_thread(0);
827 
828 	teardown_test();
829 }
830 
831 static void
832 io_during_qos_queue(void)
833 {
834 	struct spdk_io_channel *io_ch[2];
835 	struct spdk_bdev_channel *bdev_ch[2];
836 	struct spdk_bdev *bdev;
837 	enum spdk_bdev_io_status status0, status1, status2;
838 	int rc;
839 
840 	setup_test();
841 	MOCK_SET(spdk_get_ticks, 0);
842 
843 	/* Enable QoS */
844 	bdev = &g_bdev.bdev;
845 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
846 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
847 	TAILQ_INIT(&bdev->internal.qos->queued);
848 	/*
849 	 * Enable read/write IOPS, read only byte per sec, write only
850 	 * byte per sec and read/write byte per sec rate limits.
851 	 * In this case, both read only and write only byte per sec
852 	 * rate limit will take effect.
853 	 */
854 	/* 4000 read/write I/O per second, or 4 per millisecond */
855 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
856 	/* 8K byte per millisecond with 4K block size */
857 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
858 	/* 4K byte per millisecond with 4K block size */
859 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
860 	/* 4K byte per millisecond with 4K block size */
861 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
862 
863 	g_get_io_channel = true;
864 
865 	/* Create channels */
866 	set_thread(0);
867 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
868 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
869 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
870 
871 	set_thread(1);
872 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
873 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
874 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
875 
876 	/* Send two read I/Os */
877 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
878 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
879 	CU_ASSERT(rc == 0);
880 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
881 	set_thread(0);
882 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
883 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
884 	CU_ASSERT(rc == 0);
885 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
886 	/* Send one write I/O */
887 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
888 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
889 	CU_ASSERT(rc == 0);
890 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
891 
892 	/* Complete any I/O that arrived at the disk */
893 	poll_threads();
894 	set_thread(1);
895 	stub_complete_io(g_bdev.io_target, 0);
896 	set_thread(0);
897 	stub_complete_io(g_bdev.io_target, 0);
898 	poll_threads();
899 
900 	/* Only one of the two read I/Os should complete. (logical XOR) */
901 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
902 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
903 	} else {
904 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
905 	}
906 	/* The write I/O should complete. */
907 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
908 
909 	/* Advance in time by a millisecond */
910 	spdk_delay_us(1000);
911 
912 	/* Complete more I/O */
913 	poll_threads();
914 	set_thread(1);
915 	stub_complete_io(g_bdev.io_target, 0);
916 	set_thread(0);
917 	stub_complete_io(g_bdev.io_target, 0);
918 	poll_threads();
919 
920 	/* Now the second read I/O should be done */
921 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
922 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
923 
924 	/* Tear down the channels */
925 	set_thread(1);
926 	spdk_put_io_channel(io_ch[1]);
927 	set_thread(0);
928 	spdk_put_io_channel(io_ch[0]);
929 	poll_threads();
930 
931 	teardown_test();
932 }
933 
934 static void
935 io_during_qos_reset(void)
936 {
937 	struct spdk_io_channel *io_ch[2];
938 	struct spdk_bdev_channel *bdev_ch[2];
939 	struct spdk_bdev *bdev;
940 	enum spdk_bdev_io_status status0, status1, reset_status;
941 	int rc;
942 
943 	setup_test();
944 	MOCK_SET(spdk_get_ticks, 0);
945 
946 	/* Enable QoS */
947 	bdev = &g_bdev.bdev;
948 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
949 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
950 	TAILQ_INIT(&bdev->internal.qos->queued);
951 	/*
952 	 * Enable read/write IOPS, write only byte per sec and
953 	 * read/write byte per second rate limits.
954 	 * In this case, read/write byte per second rate limit will
955 	 * take effect first.
956 	 */
957 	/* 2000 read/write I/O per second, or 2 per millisecond */
958 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
959 	/* 4K byte per millisecond with 4K block size */
960 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
961 	/* 8K byte per millisecond with 4K block size */
962 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
963 
964 	g_get_io_channel = true;
965 
966 	/* Create channels */
967 	set_thread(0);
968 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
969 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
970 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
971 
972 	set_thread(1);
973 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
974 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
975 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
976 
977 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
978 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
979 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
980 	CU_ASSERT(rc == 0);
981 	set_thread(0);
982 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
983 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
984 	CU_ASSERT(rc == 0);
985 
986 	poll_threads();
987 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
988 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
989 
990 	/* Reset the bdev. */
991 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
992 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
993 	CU_ASSERT(rc == 0);
994 
995 	/* Complete any I/O that arrived at the disk */
996 	poll_threads();
997 	set_thread(1);
998 	stub_complete_io(g_bdev.io_target, 0);
999 	set_thread(0);
1000 	stub_complete_io(g_bdev.io_target, 0);
1001 	poll_threads();
1002 
1003 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1004 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
1005 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1006 
1007 	/* Tear down the channels */
1008 	set_thread(1);
1009 	spdk_put_io_channel(io_ch[1]);
1010 	set_thread(0);
1011 	spdk_put_io_channel(io_ch[0]);
1012 	poll_threads();
1013 
1014 	teardown_test();
1015 }
1016 
1017 static void
1018 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1019 {
1020 	enum spdk_bdev_io_status *status = cb_arg;
1021 
1022 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
1023 	spdk_bdev_free_io(bdev_io);
1024 }
1025 
1026 static void
1027 enomem(void)
1028 {
1029 	struct spdk_io_channel *io_ch;
1030 	struct spdk_bdev_channel *bdev_ch;
1031 	struct spdk_bdev_shared_resource *shared_resource;
1032 	struct ut_bdev_channel *ut_ch;
1033 	const uint32_t IO_ARRAY_SIZE = 64;
1034 	const uint32_t AVAIL = 20;
1035 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1036 	uint32_t nomem_cnt, i;
1037 	struct spdk_bdev_io *first_io;
1038 	int rc;
1039 
1040 	setup_test();
1041 
1042 	set_thread(0);
1043 	io_ch = spdk_bdev_get_io_channel(g_desc);
1044 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1045 	shared_resource = bdev_ch->shared_resource;
1046 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1047 	ut_ch->avail_cnt = AVAIL;
1048 
1049 	/* First submit a number of IOs equal to what the channel can support. */
1050 	for (i = 0; i < AVAIL; i++) {
1051 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1052 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1053 		CU_ASSERT(rc == 0);
1054 	}
1055 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1056 
1057 	/*
1058 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
1059 	 *  the enomem_io list.
1060 	 */
1061 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1062 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1063 	CU_ASSERT(rc == 0);
1064 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1065 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
1066 
1067 	/*
1068 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
1069 	 *  the first_io above.
1070 	 */
1071 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
1072 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1073 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1074 		CU_ASSERT(rc == 0);
1075 	}
1076 
1077 	/* Assert that first_io is still at the head of the list. */
1078 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1079 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1080 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1081 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
1082 
1083 	/*
1084 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
1085 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
1086 	 *  list.
1087 	 */
1088 	stub_complete_io(g_bdev.io_target, 1);
1089 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1090 
1091 	/*
1092 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
1093 	 *  and we should see I/O get resubmitted to the test bdev module.
1094 	 */
1095 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1096 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1097 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1098 
1099 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1100 	stub_complete_io(g_bdev.io_target, 1);
1101 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
1102 
1103 	/*
1104 	 * Send a reset and confirm that all I/O are completed, including the ones that
1105 	 *  were queued on the nomem_io list.
1106 	 */
1107 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1108 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1109 	poll_threads();
1110 	CU_ASSERT(rc == 0);
1111 	/* This will complete the reset. */
1112 	stub_complete_io(g_bdev.io_target, 0);
1113 
1114 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1115 	CU_ASSERT(shared_resource->io_outstanding == 0);
1116 
1117 	spdk_put_io_channel(io_ch);
1118 	poll_threads();
1119 	teardown_test();
1120 }
1121 
1122 static void
1123 enomem_multi_bdev(void)
1124 {
1125 	struct spdk_io_channel *io_ch;
1126 	struct spdk_bdev_channel *bdev_ch;
1127 	struct spdk_bdev_shared_resource *shared_resource;
1128 	struct ut_bdev_channel *ut_ch;
1129 	const uint32_t IO_ARRAY_SIZE = 64;
1130 	const uint32_t AVAIL = 20;
1131 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1132 	uint32_t i;
1133 	struct ut_bdev *second_bdev;
1134 	struct spdk_bdev_desc *second_desc = NULL;
1135 	struct spdk_bdev_channel *second_bdev_ch;
1136 	struct spdk_io_channel *second_ch;
1137 	int rc;
1138 
1139 	setup_test();
1140 
1141 	/* Register second bdev with the same io_target  */
1142 	second_bdev = calloc(1, sizeof(*second_bdev));
1143 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1144 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1145 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1146 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1147 
1148 	set_thread(0);
1149 	io_ch = spdk_bdev_get_io_channel(g_desc);
1150 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1151 	shared_resource = bdev_ch->shared_resource;
1152 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1153 	ut_ch->avail_cnt = AVAIL;
1154 
1155 	second_ch = spdk_bdev_get_io_channel(second_desc);
1156 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1157 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1158 
1159 	/* Saturate io_target through bdev A. */
1160 	for (i = 0; i < AVAIL; i++) {
1161 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1162 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1163 		CU_ASSERT(rc == 0);
1164 	}
1165 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1166 
1167 	/*
1168 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1169 	 * and then go onto the nomem_io list.
1170 	 */
1171 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1172 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1173 	CU_ASSERT(rc == 0);
1174 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1175 
1176 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1177 	stub_complete_io(g_bdev.io_target, AVAIL);
1178 
1179 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1180 	CU_ASSERT(shared_resource->io_outstanding == 1);
1181 
1182 	/* Now complete our retried I/O  */
1183 	stub_complete_io(g_bdev.io_target, 1);
1184 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1185 
1186 	spdk_put_io_channel(io_ch);
1187 	spdk_put_io_channel(second_ch);
1188 	spdk_bdev_close(second_desc);
1189 	unregister_bdev(second_bdev);
1190 	poll_threads();
1191 	free(second_bdev);
1192 	teardown_test();
1193 }
1194 
1195 static void
1196 enomem_multi_bdev_unregister(void)
1197 {
1198 	struct spdk_io_channel *io_ch;
1199 	struct spdk_bdev_channel *bdev_ch;
1200 	struct spdk_bdev_shared_resource *shared_resource;
1201 	struct ut_bdev_channel *ut_ch;
1202 	const uint32_t IO_ARRAY_SIZE = 64;
1203 	const uint32_t AVAIL = 20;
1204 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1205 	uint32_t i;
1206 	int rc;
1207 
1208 	setup_test();
1209 
1210 	set_thread(0);
1211 	io_ch = spdk_bdev_get_io_channel(g_desc);
1212 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1213 	shared_resource = bdev_ch->shared_resource;
1214 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1215 	ut_ch->avail_cnt = AVAIL;
1216 
1217 	/* Saturate io_target through the bdev. */
1218 	for (i = 0; i < AVAIL; i++) {
1219 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1220 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1221 		CU_ASSERT(rc == 0);
1222 	}
1223 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1224 
1225 	/*
1226 	 * Now submit I/O through the bdev. This should fail with ENOMEM
1227 	 * and then go onto the nomem_io list.
1228 	 */
1229 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1230 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1231 	CU_ASSERT(rc == 0);
1232 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1233 
1234 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
1235 	unregister_bdev(&g_bdev);
1236 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
1237 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1238 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
1239 
1240 	/* Complete the bdev's I/O. */
1241 	stub_complete_io(g_bdev.io_target, AVAIL);
1242 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1243 
1244 	spdk_put_io_channel(io_ch);
1245 	poll_threads();
1246 	teardown_test();
1247 }
1248 
1249 static void
1250 enomem_multi_io_target(void)
1251 {
1252 	struct spdk_io_channel *io_ch;
1253 	struct spdk_bdev_channel *bdev_ch;
1254 	struct ut_bdev_channel *ut_ch;
1255 	const uint32_t IO_ARRAY_SIZE = 64;
1256 	const uint32_t AVAIL = 20;
1257 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1258 	uint32_t i;
1259 	int new_io_device;
1260 	struct ut_bdev *second_bdev;
1261 	struct spdk_bdev_desc *second_desc = NULL;
1262 	struct spdk_bdev_channel *second_bdev_ch;
1263 	struct spdk_io_channel *second_ch;
1264 	int rc;
1265 
1266 	setup_test();
1267 
1268 	/* Create new io_target and a second bdev using it */
1269 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1270 				sizeof(struct ut_bdev_channel), NULL);
1271 	second_bdev = calloc(1, sizeof(*second_bdev));
1272 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1273 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
1274 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
1275 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1276 
1277 	set_thread(0);
1278 	io_ch = spdk_bdev_get_io_channel(g_desc);
1279 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1280 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1281 	ut_ch->avail_cnt = AVAIL;
1282 
1283 	/* Different io_target should imply a different shared_resource */
1284 	second_ch = spdk_bdev_get_io_channel(second_desc);
1285 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1286 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1287 
1288 	/* Saturate io_target through bdev A. */
1289 	for (i = 0; i < AVAIL; i++) {
1290 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1291 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1292 		CU_ASSERT(rc == 0);
1293 	}
1294 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1295 
1296 	/* Issue one more I/O to fill ENOMEM list. */
1297 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1298 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1299 	CU_ASSERT(rc == 0);
1300 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1301 
1302 	/*
1303 	 * Now submit I/O through the second bdev. This should go through and complete
1304 	 * successfully because we're using a different io_device underneath.
1305 	 */
1306 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1307 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1308 	CU_ASSERT(rc == 0);
1309 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1310 	stub_complete_io(second_bdev->io_target, 1);
1311 
1312 	/* Cleanup; Complete outstanding I/O. */
1313 	stub_complete_io(g_bdev.io_target, AVAIL);
1314 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1315 	/* Complete the ENOMEM I/O */
1316 	stub_complete_io(g_bdev.io_target, 1);
1317 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1318 
1319 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1320 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1321 	spdk_put_io_channel(io_ch);
1322 	spdk_put_io_channel(second_ch);
1323 	spdk_bdev_close(second_desc);
1324 	unregister_bdev(second_bdev);
1325 	spdk_io_device_unregister(&new_io_device, NULL);
1326 	poll_threads();
1327 	free(second_bdev);
1328 	teardown_test();
1329 }
1330 
1331 static void
1332 qos_dynamic_enable_done(void *cb_arg, int status)
1333 {
1334 	int *rc = cb_arg;
1335 	*rc = status;
1336 }
1337 
1338 static void
1339 qos_dynamic_enable(void)
1340 {
1341 	struct spdk_io_channel *io_ch[2];
1342 	struct spdk_bdev_channel *bdev_ch[2];
1343 	struct spdk_bdev *bdev;
1344 	enum spdk_bdev_io_status bdev_io_status[2];
1345 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1346 	int status, second_status, rc, i;
1347 
1348 	setup_test();
1349 	MOCK_SET(spdk_get_ticks, 0);
1350 
1351 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
1352 		limits[i] = UINT64_MAX;
1353 	}
1354 
1355 	bdev = &g_bdev.bdev;
1356 
1357 	g_get_io_channel = true;
1358 
1359 	/* Create channels */
1360 	set_thread(0);
1361 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1362 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1363 	CU_ASSERT(bdev_ch[0]->flags == 0);
1364 
1365 	set_thread(1);
1366 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1367 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1368 	CU_ASSERT(bdev_ch[1]->flags == 0);
1369 
1370 	set_thread(0);
1371 
1372 	/*
1373 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1374 	 * Read only byte and Write only byte per second
1375 	 * rate limits.
1376 	 * More than 10 I/Os allowed per timeslice.
1377 	 */
1378 	status = -1;
1379 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1380 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1381 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1382 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
1383 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1384 	poll_threads();
1385 	CU_ASSERT(status == 0);
1386 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1387 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1388 
1389 	/*
1390 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1391 	 * Additional I/O will then be queued.
1392 	 */
1393 	set_thread(0);
1394 	for (i = 0; i < 10; i++) {
1395 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1396 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1397 		CU_ASSERT(rc == 0);
1398 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1399 		poll_thread(0);
1400 		stub_complete_io(g_bdev.io_target, 0);
1401 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1402 	}
1403 
1404 	/*
1405 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1406 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1407 	 *  1) are not aborted
1408 	 *  2) are sent back to their original thread for resubmission
1409 	 */
1410 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1411 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1412 	CU_ASSERT(rc == 0);
1413 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1414 	set_thread(1);
1415 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1416 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1417 	CU_ASSERT(rc == 0);
1418 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1419 	poll_threads();
1420 
1421 	/*
1422 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1423 	 * Read only byte rate limits
1424 	 */
1425 	status = -1;
1426 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1427 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1428 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
1429 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1430 	poll_threads();
1431 	CU_ASSERT(status == 0);
1432 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1433 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1434 
1435 	/* Disable QoS: Write only Byte per second rate limit */
1436 	status = -1;
1437 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
1438 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1439 	poll_threads();
1440 	CU_ASSERT(status == 0);
1441 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1442 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1443 
1444 	/*
1445 	 * All I/O should have been resubmitted back on their original thread.  Complete
1446 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1447 	 */
1448 	set_thread(0);
1449 	stub_complete_io(g_bdev.io_target, 0);
1450 	poll_threads();
1451 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1452 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1453 
1454 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1455 	set_thread(1);
1456 	stub_complete_io(g_bdev.io_target, 0);
1457 	poll_threads();
1458 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1459 
1460 	/* Disable QoS again */
1461 	status = -1;
1462 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1463 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1464 	poll_threads();
1465 	CU_ASSERT(status == 0); /* This should succeed */
1466 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1467 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1468 
1469 	/* Enable QoS on thread 0 */
1470 	status = -1;
1471 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1472 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1473 	poll_threads();
1474 	CU_ASSERT(status == 0);
1475 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1476 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1477 
1478 	/* Disable QoS on thread 1 */
1479 	set_thread(1);
1480 	status = -1;
1481 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1482 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1483 	/* Don't poll yet. This should leave the channels with QoS enabled */
1484 	CU_ASSERT(status == -1);
1485 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1486 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1487 
1488 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1489 	second_status = 0;
1490 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
1491 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1492 	poll_threads();
1493 	CU_ASSERT(status == 0); /* The disable should succeed */
1494 	CU_ASSERT(second_status < 0); /* The enable should fail */
1495 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1496 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1497 
1498 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1499 	status = -1;
1500 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
1501 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1502 	poll_threads();
1503 	CU_ASSERT(status == 0);
1504 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1505 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1506 
1507 	/* Tear down the channels */
1508 	set_thread(0);
1509 	spdk_put_io_channel(io_ch[0]);
1510 	set_thread(1);
1511 	spdk_put_io_channel(io_ch[1]);
1512 	poll_threads();
1513 
1514 	set_thread(0);
1515 	teardown_test();
1516 }
1517 
1518 static void
1519 histogram_status_cb(void *cb_arg, int status)
1520 {
1521 	g_status = status;
1522 }
1523 
1524 static void
1525 histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
1526 {
1527 	g_status = status;
1528 	g_histogram = histogram;
1529 }
1530 
1531 static void
1532 histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
1533 		   uint64_t total, uint64_t so_far)
1534 {
1535 	g_count += count;
1536 }
1537 
1538 static void
1539 bdev_histograms_mt(void)
1540 {
1541 	struct spdk_io_channel *ch[2];
1542 	struct spdk_histogram_data *histogram;
1543 	uint8_t buf[4096];
1544 	int status = false;
1545 	int rc;
1546 
1547 
1548 	setup_test();
1549 
1550 	set_thread(0);
1551 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1552 	CU_ASSERT(ch[0] != NULL);
1553 
1554 	set_thread(1);
1555 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1556 	CU_ASSERT(ch[1] != NULL);
1557 
1558 
1559 	/* Enable histogram */
1560 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
1561 	poll_threads();
1562 	CU_ASSERT(g_status == 0);
1563 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1564 
1565 	/* Allocate histogram */
1566 	histogram = spdk_histogram_data_alloc();
1567 
1568 	/* Check if histogram is zeroed */
1569 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1570 	poll_threads();
1571 	CU_ASSERT(g_status == 0);
1572 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1573 
1574 	g_count = 0;
1575 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1576 
1577 	CU_ASSERT(g_count == 0);
1578 
1579 	set_thread(0);
1580 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
1581 	CU_ASSERT(rc == 0);
1582 
1583 	spdk_delay_us(10);
1584 	stub_complete_io(g_bdev.io_target, 1);
1585 	poll_threads();
1586 	CU_ASSERT(status == true);
1587 
1588 
1589 	set_thread(1);
1590 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
1591 	CU_ASSERT(rc == 0);
1592 
1593 	spdk_delay_us(10);
1594 	stub_complete_io(g_bdev.io_target, 1);
1595 	poll_threads();
1596 	CU_ASSERT(status == true);
1597 
1598 	set_thread(0);
1599 
1600 	/* Check if histogram gathered data from all I/O channels */
1601 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
1602 	poll_threads();
1603 	CU_ASSERT(g_status == 0);
1604 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
1605 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
1606 
1607 	g_count = 0;
1608 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
1609 	CU_ASSERT(g_count == 2);
1610 
1611 	/* Disable histogram */
1612 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
1613 	poll_threads();
1614 	CU_ASSERT(g_status == 0);
1615 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
1616 
1617 	spdk_histogram_data_free(histogram);
1618 
1619 	/* Tear down the channels */
1620 	set_thread(0);
1621 	spdk_put_io_channel(ch[0]);
1622 	set_thread(1);
1623 	spdk_put_io_channel(ch[1]);
1624 	poll_threads();
1625 	set_thread(0);
1626 	teardown_test();
1627 
1628 }
1629 
1630 struct timeout_io_cb_arg {
1631 	struct iovec iov;
1632 	uint8_t type;
1633 };
1634 
1635 static int
1636 bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
1637 {
1638 	struct spdk_bdev_io *bdev_io;
1639 	int n = 0;
1640 
1641 	if (!ch) {
1642 		return -1;
1643 	}
1644 
1645 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
1646 		n++;
1647 	}
1648 
1649 	return n;
1650 }
1651 
1652 static void
1653 bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
1654 {
1655 	struct timeout_io_cb_arg *ctx = cb_arg;
1656 
1657 	ctx->type = bdev_io->type;
1658 	ctx->iov.iov_base = bdev_io->iov.iov_base;
1659 	ctx->iov.iov_len = bdev_io->iov.iov_len;
1660 }
1661 
1662 static bool g_io_done;
1663 
1664 static void
1665 io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1666 {
1667 	g_io_done = true;
1668 	spdk_bdev_free_io(bdev_io);
1669 }
1670 
1671 static void
1672 bdev_set_io_timeout_mt(void)
1673 {
1674 	struct spdk_io_channel *ch[3];
1675 	struct spdk_bdev_channel *bdev_ch[3];
1676 	struct timeout_io_cb_arg cb_arg;
1677 
1678 	setup_test();
1679 
1680 	g_bdev.bdev.optimal_io_boundary = 16;
1681 	g_bdev.bdev.split_on_optimal_io_boundary = true;
1682 
1683 	set_thread(0);
1684 	ch[0] = spdk_bdev_get_io_channel(g_desc);
1685 	CU_ASSERT(ch[0] != NULL);
1686 
1687 	set_thread(1);
1688 	ch[1] = spdk_bdev_get_io_channel(g_desc);
1689 	CU_ASSERT(ch[1] != NULL);
1690 
1691 	set_thread(2);
1692 	ch[2] = spdk_bdev_get_io_channel(g_desc);
1693 	CU_ASSERT(ch[2] != NULL);
1694 
1695 	/* Multi-thread mode
1696 	 * 1, Check the poller was registered successfully
1697 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
1698 	 * 3, Check the link int the bdev_ch works right.
1699 	 * 4, Close desc and put io channel during the timeout poller is polling
1700 	 */
1701 
1702 	/* In desc thread set the timeout */
1703 	set_thread(0);
1704 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
1705 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
1706 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
1707 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
1708 
1709 	/* check the IO submitted list and timeout handler */
1710 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
1711 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
1712 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
1713 
1714 	set_thread(1);
1715 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
1716 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
1717 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
1718 
1719 	/* Now test that a single-vector command is split correctly.
1720 	 * Offset 14, length 8, payload 0xF000
1721 	 *  Child - Offset 14, length 2, payload 0xF000
1722 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
1723 	 *
1724 	 * Set up the expected values before calling spdk_bdev_read_blocks
1725 	 */
1726 	set_thread(2);
1727 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
1728 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
1729 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
1730 
1731 	set_thread(0);
1732 	memset(&cb_arg, 0, sizeof(cb_arg));
1733 	spdk_delay_us(3 * spdk_get_ticks_hz());
1734 	poll_threads();
1735 	CU_ASSERT(cb_arg.type == 0);
1736 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1737 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1738 
1739 	/* Now the time reach the limit */
1740 	spdk_delay_us(3 * spdk_get_ticks_hz());
1741 	poll_thread(0);
1742 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
1743 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
1744 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
1745 	stub_complete_io(g_bdev.io_target, 1);
1746 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
1747 
1748 	memset(&cb_arg, 0, sizeof(cb_arg));
1749 	set_thread(1);
1750 	poll_thread(1);
1751 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
1752 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
1753 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
1754 	stub_complete_io(g_bdev.io_target, 1);
1755 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
1756 
1757 	memset(&cb_arg, 0, sizeof(cb_arg));
1758 	set_thread(2);
1759 	poll_thread(2);
1760 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
1761 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
1762 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
1763 	stub_complete_io(g_bdev.io_target, 1);
1764 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
1765 	stub_complete_io(g_bdev.io_target, 1);
1766 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
1767 
1768 	/* Run poll_timeout_done() it means complete the timeout poller */
1769 	set_thread(0);
1770 	poll_thread(0);
1771 	CU_ASSERT(g_desc->refs == 0);
1772 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
1773 	set_thread(1);
1774 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
1775 	set_thread(2);
1776 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
1777 
1778 	/* Trigger timeout poller to run again, desc->refs is incremented.
1779 	 * In thread 0 we destroy the io channel before timeout poller runs.
1780 	 * Timeout callback is not called on thread 0.
1781 	 */
1782 	spdk_delay_us(6 * spdk_get_ticks_hz());
1783 	memset(&cb_arg, 0, sizeof(cb_arg));
1784 	set_thread(0);
1785 	stub_complete_io(g_bdev.io_target, 1);
1786 	spdk_put_io_channel(ch[0]);
1787 	poll_thread(0);
1788 	CU_ASSERT(g_desc->refs == 1)
1789 	CU_ASSERT(cb_arg.type == 0);
1790 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1791 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1792 
1793 	/* In thread 1 timeout poller runs then we destroy the io channel
1794 	 * Timeout callback is called on thread 1.
1795 	 */
1796 	memset(&cb_arg, 0, sizeof(cb_arg));
1797 	set_thread(1);
1798 	poll_thread(1);
1799 	stub_complete_io(g_bdev.io_target, 1);
1800 	spdk_put_io_channel(ch[1]);
1801 	poll_thread(1);
1802 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
1803 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
1804 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
1805 
1806 	/* Close the desc.
1807 	 * Unregister the timeout poller first.
1808 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
1809 	 */
1810 	set_thread(0);
1811 	spdk_bdev_close(g_desc);
1812 	CU_ASSERT(g_desc->refs == 1);
1813 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
1814 
1815 	/* Timeout poller runs on thread 2 then we destroy the io channel.
1816 	 * Desc is closed so we would exit the timeout poller directly.
1817 	 * timeout callback is not called on thread 2.
1818 	 */
1819 	memset(&cb_arg, 0, sizeof(cb_arg));
1820 	set_thread(2);
1821 	poll_thread(2);
1822 	stub_complete_io(g_bdev.io_target, 1);
1823 	spdk_put_io_channel(ch[2]);
1824 	poll_thread(2);
1825 	CU_ASSERT(cb_arg.type == 0);
1826 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
1827 	CU_ASSERT(cb_arg.iov.iov_len == 0);
1828 
1829 	set_thread(0);
1830 	poll_thread(0);
1831 	g_teardown_done = false;
1832 	unregister_bdev(&g_bdev);
1833 	spdk_io_device_unregister(&g_io_device, NULL);
1834 	spdk_bdev_finish(finish_cb, NULL);
1835 	poll_threads();
1836 	memset(&g_bdev, 0, sizeof(g_bdev));
1837 	CU_ASSERT(g_teardown_done == true);
1838 	g_teardown_done = false;
1839 	free_threads();
1840 	free_cores();
1841 }
1842 
1843 static bool g_io_done2;
1844 static bool g_lock_lba_range_done;
1845 static bool g_unlock_lba_range_done;
1846 
1847 static void
1848 io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
1849 {
1850 	g_io_done2 = true;
1851 	spdk_bdev_free_io(bdev_io);
1852 }
1853 
1854 static void
1855 lock_lba_range_done(void *ctx, int status)
1856 {
1857 	g_lock_lba_range_done = true;
1858 }
1859 
1860 static void
1861 unlock_lba_range_done(void *ctx, int status)
1862 {
1863 	g_unlock_lba_range_done = true;
1864 }
1865 
1866 static uint32_t
1867 stub_channel_outstanding_cnt(void *io_target)
1868 {
1869 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
1870 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
1871 	uint32_t outstanding_cnt;
1872 
1873 	outstanding_cnt = ch->outstanding_cnt;
1874 
1875 	spdk_put_io_channel(_ch);
1876 	return outstanding_cnt;
1877 }
1878 
1879 static void
1880 lock_lba_range_then_submit_io(void)
1881 {
1882 	struct spdk_bdev_desc *desc = NULL;
1883 	void *io_target;
1884 	struct spdk_io_channel *io_ch[3];
1885 	struct spdk_bdev_channel *bdev_ch[3];
1886 	struct lba_range *range;
1887 	char buf[4096];
1888 	int ctx0, ctx1, ctx2;
1889 	int rc;
1890 
1891 	setup_test();
1892 
1893 	io_target = g_bdev.io_target;
1894 	desc = g_desc;
1895 
1896 	set_thread(0);
1897 	io_ch[0] = spdk_bdev_get_io_channel(desc);
1898 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1899 	CU_ASSERT(io_ch[0] != NULL);
1900 
1901 	set_thread(1);
1902 	io_ch[1] = spdk_bdev_get_io_channel(desc);
1903 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1904 	CU_ASSERT(io_ch[1] != NULL);
1905 
1906 	set_thread(0);
1907 	g_lock_lba_range_done = false;
1908 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
1909 	CU_ASSERT(rc == 0);
1910 	poll_threads();
1911 
1912 	/* The lock should immediately become valid, since there are no outstanding
1913 	 * write I/O.
1914 	 */
1915 	CU_ASSERT(g_lock_lba_range_done == true);
1916 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
1917 	SPDK_CU_ASSERT_FATAL(range != NULL);
1918 	CU_ASSERT(range->offset == 20);
1919 	CU_ASSERT(range->length == 10);
1920 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
1921 
1922 	g_io_done = false;
1923 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1924 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
1925 	CU_ASSERT(rc == 0);
1926 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1927 
1928 	stub_complete_io(io_target, 1);
1929 	poll_threads();
1930 	CU_ASSERT(g_io_done == true);
1931 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1932 
1933 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
1934 	 * holding the lock is submitting the write I/O.
1935 	 */
1936 	g_io_done = false;
1937 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1938 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
1939 	CU_ASSERT(rc == 0);
1940 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1941 
1942 	stub_complete_io(io_target, 1);
1943 	poll_threads();
1944 	CU_ASSERT(g_io_done == true);
1945 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
1946 
1947 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
1948 	set_thread(1);
1949 	g_io_done = false;
1950 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1951 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
1952 	CU_ASSERT(rc == 0);
1953 	poll_threads();
1954 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
1955 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1956 	CU_ASSERT(g_io_done == false);
1957 
1958 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
1959 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
1960 	CU_ASSERT(rc == -EINVAL);
1961 
1962 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
1963 	 * The new channel should inherit the active locks from the bdev's internal list.
1964 	 */
1965 	set_thread(2);
1966 	io_ch[2] = spdk_bdev_get_io_channel(desc);
1967 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
1968 	CU_ASSERT(io_ch[2] != NULL);
1969 
1970 	g_io_done2 = false;
1971 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1972 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
1973 	CU_ASSERT(rc == 0);
1974 	poll_threads();
1975 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
1976 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1977 	CU_ASSERT(g_io_done2 == false);
1978 
1979 	set_thread(0);
1980 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
1981 	CU_ASSERT(rc == 0);
1982 	poll_threads();
1983 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
1984 
1985 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
1986 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
1987 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
1988 
1989 	set_thread(1);
1990 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1991 	stub_complete_io(io_target, 1);
1992 	set_thread(2);
1993 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
1994 	stub_complete_io(io_target, 1);
1995 
1996 	poll_threads();
1997 	CU_ASSERT(g_io_done == true);
1998 	CU_ASSERT(g_io_done2 == true);
1999 
2000 	/* Tear down the channels */
2001 	set_thread(0);
2002 	spdk_put_io_channel(io_ch[0]);
2003 	set_thread(1);
2004 	spdk_put_io_channel(io_ch[1]);
2005 	set_thread(2);
2006 	spdk_put_io_channel(io_ch[2]);
2007 	poll_threads();
2008 	set_thread(0);
2009 	teardown_test();
2010 }
2011 
2012 /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2013  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2014  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2015  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2016  * completes. Test this behavior.
2017  */
2018 static void
2019 unregister_during_reset(void)
2020 {
2021 	struct spdk_io_channel *io_ch[2];
2022 	bool done_reset = false, done_unregister = false;
2023 	int rc;
2024 
2025 	setup_test();
2026 	set_thread(0);
2027 
2028 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2029 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2030 
2031 	set_thread(1);
2032 
2033 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2034 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2035 
2036 	set_thread(0);
2037 
2038 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2039 
2040 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2041 	CU_ASSERT(rc == 0);
2042 
2043 	set_thread(0);
2044 
2045 	poll_thread_times(0, 1);
2046 
2047 	spdk_bdev_close(g_desc);
2048 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2049 
2050 	CU_ASSERT(done_reset == false);
2051 	CU_ASSERT(done_unregister == false);
2052 
2053 	poll_threads();
2054 
2055 	stub_complete_io(g_bdev.io_target, 0);
2056 
2057 	poll_threads();
2058 
2059 	CU_ASSERT(done_reset == true);
2060 	CU_ASSERT(done_unregister == false);
2061 
2062 	spdk_put_io_channel(io_ch[0]);
2063 
2064 	set_thread(1);
2065 
2066 	spdk_put_io_channel(io_ch[1]);
2067 
2068 	poll_threads();
2069 
2070 	CU_ASSERT(done_unregister == true);
2071 
2072 	/* Restore the original g_bdev so that we can use teardown_test(). */
2073 	set_thread(0);
2074 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2075 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2076 	teardown_test();
2077 }
2078 
2079 int
2080 main(int argc, char **argv)
2081 {
2082 	CU_pSuite	suite = NULL;
2083 	unsigned int	num_failures;
2084 
2085 	CU_set_error_action(CUEA_ABORT);
2086 	CU_initialize_registry();
2087 
2088 	suite = CU_add_suite("bdev", NULL, NULL);
2089 
2090 	CU_ADD_TEST(suite, basic);
2091 	CU_ADD_TEST(suite, unregister_and_close);
2092 	CU_ADD_TEST(suite, basic_qos);
2093 	CU_ADD_TEST(suite, put_channel_during_reset);
2094 	CU_ADD_TEST(suite, aborted_reset);
2095 	CU_ADD_TEST(suite, io_during_reset);
2096 	CU_ADD_TEST(suite, io_during_qos_queue);
2097 	CU_ADD_TEST(suite, io_during_qos_reset);
2098 	CU_ADD_TEST(suite, enomem);
2099 	CU_ADD_TEST(suite, enomem_multi_bdev);
2100 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2101 	CU_ADD_TEST(suite, enomem_multi_io_target);
2102 	CU_ADD_TEST(suite, qos_dynamic_enable);
2103 	CU_ADD_TEST(suite, bdev_histograms_mt);
2104 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2105 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2106 	CU_ADD_TEST(suite, unregister_during_reset);
2107 
2108 	CU_basic_set_mode(CU_BRM_VERBOSE);
2109 	CU_basic_run_tests();
2110 	num_failures = CU_get_number_of_failures();
2111 	CU_cleanup_registry();
2112 	return num_failures;
2113 }
2114