xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 52a4134875252629d5d87a15dc337c6bfe0b3746)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2017 Intel Corporation.
3debc2429SAnton Nayshtut  *   Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES.
4674c7097SJim Harris  *   All rights reserved.
5674c7097SJim Harris  */
6674c7097SJim Harris 
7ae431e31SKonrad Sztyber #include "spdk_internal/cunit.h"
8674c7097SJim Harris 
9899eb5f1SSeth Howell #include "common/lib/ut_multithread.c"
105ffa5c00SPawel Wodkowski #include "unit/lib/json_mock.c"
11674c7097SJim Harris 
12c4fee1e9SPawel Wodkowski #include "spdk/config.h"
13674c7097SJim Harris /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
14674c7097SJim Harris #undef SPDK_CONFIG_VTUNE
15674c7097SJim Harris 
16d8c3ff5fSJim Harris #include "bdev/bdev.c"
17674c7097SJim Harris 
18ff173863SShuhei Matsumoto #include "common/lib/bdev/common_stubs.h"
19674c7097SJim Harris 
20ff173863SShuhei Matsumoto #define BDEV_UT_NUM_THREADS 3
211299439fSAlexey Marchuk 
221299439fSAlexey Marchuk DEFINE_RETURN_MOCK(spdk_memory_domain_pull_data, int);
231299439fSAlexey Marchuk int
241299439fSAlexey Marchuk spdk_memory_domain_pull_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
251299439fSAlexey Marchuk 			     struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, uint32_t dst_iov_cnt,
261299439fSAlexey Marchuk 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
271299439fSAlexey Marchuk {
281299439fSAlexey Marchuk 	HANDLE_RETURN_MOCK(spdk_memory_domain_pull_data);
291299439fSAlexey Marchuk 
301299439fSAlexey Marchuk 	cpl_cb(cpl_cb_arg, 0);
311299439fSAlexey Marchuk 	return 0;
321299439fSAlexey Marchuk }
331299439fSAlexey Marchuk 
341299439fSAlexey Marchuk DEFINE_RETURN_MOCK(spdk_memory_domain_push_data, int);
351299439fSAlexey Marchuk int
361299439fSAlexey Marchuk spdk_memory_domain_push_data(struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
371299439fSAlexey Marchuk 			     struct iovec *dst_iov, uint32_t dst_iovcnt, struct iovec *src_iov, uint32_t src_iovcnt,
381299439fSAlexey Marchuk 			     spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg)
391299439fSAlexey Marchuk {
401299439fSAlexey Marchuk 	HANDLE_RETURN_MOCK(spdk_memory_domain_push_data);
411299439fSAlexey Marchuk 
421299439fSAlexey Marchuk 	cpl_cb(cpl_cb_arg, 0);
431299439fSAlexey Marchuk 	return 0;
441299439fSAlexey Marchuk }
45afaabcceSJim Harris 
4680b22cf3SKonrad Sztyber static int g_accel_io_device;
4780b22cf3SKonrad Sztyber 
4880b22cf3SKonrad Sztyber struct spdk_io_channel *
4980b22cf3SKonrad Sztyber spdk_accel_get_io_channel(void)
5080b22cf3SKonrad Sztyber {
5180b22cf3SKonrad Sztyber 	return spdk_get_io_channel(&g_accel_io_device);
5280b22cf3SKonrad Sztyber }
5380b22cf3SKonrad Sztyber 
54674c7097SJim Harris struct ut_bdev {
55674c7097SJim Harris 	struct spdk_bdev	bdev;
565e799ff4SDariusz Stojaczyk 	void			*io_target;
57674c7097SJim Harris };
58674c7097SJim Harris 
59dc996557SBen Walker struct ut_bdev_io {
60dc996557SBen Walker 	TAILQ_ENTRY(ut_bdev_io)		link;
61dc996557SBen Walker };
62dc996557SBen Walker 
63ab29d2ceSJim Harris struct ut_bdev_channel {
64dc996557SBen Walker 	TAILQ_HEAD(, ut_bdev_io)	outstanding_io;
657e846d2bSJim Harris 	uint32_t			outstanding_cnt;
667e846d2bSJim Harris 	uint32_t			avail_cnt;
67debc2429SAnton Nayshtut 	struct spdk_thread		*thread;
68debc2429SAnton Nayshtut 	TAILQ_ENTRY(ut_bdev_channel)	link;
69ab29d2ceSJim Harris };
70ab29d2ceSJim Harris 
715e799ff4SDariusz Stojaczyk int g_io_device;
72674c7097SJim Harris struct ut_bdev g_bdev;
73674c7097SJim Harris struct spdk_bdev_desc *g_desc;
746c54c13cSTomasz Zawadzki bool g_teardown_done = false;
759aed854bSGangCao bool g_get_io_channel = true;
769aed854bSGangCao bool g_create_ch = true;
77a4a497d5Spaul luse bool g_init_complete_called = false;
78c899854dSJim Harris bool g_fini_start_called = true;
7942dba604SPiotr Pelplinski int g_status = 0;
8042dba604SPiotr Pelplinski int g_count = 0;
8142dba604SPiotr Pelplinski struct spdk_histogram_data *g_histogram = NULL;
82debc2429SAnton Nayshtut TAILQ_HEAD(, ut_bdev_channel) g_ut_channels;
83674c7097SJim Harris 
84674c7097SJim Harris static int
8580b22cf3SKonrad Sztyber ut_accel_ch_create_cb(void *io_device, void *ctx)
8680b22cf3SKonrad Sztyber {
8780b22cf3SKonrad Sztyber 	return 0;
8880b22cf3SKonrad Sztyber }
8980b22cf3SKonrad Sztyber 
9080b22cf3SKonrad Sztyber static void
9180b22cf3SKonrad Sztyber ut_accel_ch_destroy_cb(void *io_device, void *ctx)
9280b22cf3SKonrad Sztyber {
9380b22cf3SKonrad Sztyber }
9480b22cf3SKonrad Sztyber 
9580b22cf3SKonrad Sztyber static int
96674c7097SJim Harris stub_create_ch(void *io_device, void *ctx_buf)
97674c7097SJim Harris {
98ab29d2ceSJim Harris 	struct ut_bdev_channel *ch = ctx_buf;
99ab29d2ceSJim Harris 
1009aed854bSGangCao 	if (g_create_ch == false) {
1019aed854bSGangCao 		return -1;
1029aed854bSGangCao 	}
1039aed854bSGangCao 
104ab29d2ceSJim Harris 	TAILQ_INIT(&ch->outstanding_io);
1057e846d2bSJim Harris 	ch->outstanding_cnt = 0;
1067e846d2bSJim Harris 	/*
1077e846d2bSJim Harris 	 * When avail gets to 0, the submit_request function will return ENOMEM.
1087e846d2bSJim Harris 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
1097e846d2bSJim Harris 	 *  big value that won't get hit.  The ENOMEM tests can then override this
1107e846d2bSJim Harris 	 *  value to something much smaller to induce ENOMEM conditions.
1117e846d2bSJim Harris 	 */
1127e846d2bSJim Harris 	ch->avail_cnt = 2048;
113debc2429SAnton Nayshtut 	ch->thread = spdk_get_thread();
114debc2429SAnton Nayshtut 
115debc2429SAnton Nayshtut 	TAILQ_INSERT_TAIL(&g_ut_channels, ch, link);
116debc2429SAnton Nayshtut 
117674c7097SJim Harris 	return 0;
118674c7097SJim Harris }
119674c7097SJim Harris 
120674c7097SJim Harris static void
121674c7097SJim Harris stub_destroy_ch(void *io_device, void *ctx_buf)
122674c7097SJim Harris {
123debc2429SAnton Nayshtut 	struct ut_bdev_channel *ch = ctx_buf;
124debc2429SAnton Nayshtut 
125debc2429SAnton Nayshtut 	TAILQ_REMOVE(&g_ut_channels, ch, link);
126674c7097SJim Harris }
127674c7097SJim Harris 
128674c7097SJim Harris static struct spdk_io_channel *
129674c7097SJim Harris stub_get_io_channel(void *ctx)
130674c7097SJim Harris {
1315e799ff4SDariusz Stojaczyk 	struct ut_bdev *ut_bdev = ctx;
1325e799ff4SDariusz Stojaczyk 
1339aed854bSGangCao 	if (g_get_io_channel == true) {
1345e799ff4SDariusz Stojaczyk 		return spdk_get_io_channel(ut_bdev->io_target);
1359aed854bSGangCao 	} else {
1369aed854bSGangCao 		return NULL;
1379aed854bSGangCao 	}
138674c7097SJim Harris }
139674c7097SJim Harris 
140674c7097SJim Harris static int
141674c7097SJim Harris stub_destruct(void *ctx)
142674c7097SJim Harris {
143674c7097SJim Harris 	return 0;
144674c7097SJim Harris }
145674c7097SJim Harris 
146ab29d2ceSJim Harris static void
147debc2429SAnton Nayshtut stub_reset_channel(void *ctx)
148ab29d2ceSJim Harris {
149debc2429SAnton Nayshtut 	struct ut_bdev_channel *ch = ctx;
150dc996557SBen Walker 	struct ut_bdev_io *bio;
15194bc8cfdSJim Harris 
15294bc8cfdSJim Harris 	while (!TAILQ_EMPTY(&ch->outstanding_io)) {
153dc996557SBen Walker 		bio = TAILQ_FIRST(&ch->outstanding_io);
154dc996557SBen Walker 		TAILQ_REMOVE(&ch->outstanding_io, bio, link);
15594bc8cfdSJim Harris 		ch->outstanding_cnt--;
156dc996557SBen Walker 		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), SPDK_BDEV_IO_STATUS_ABORTED);
15794bc8cfdSJim Harris 		ch->avail_cnt++;
15894bc8cfdSJim Harris 	}
159debc2429SAnton Nayshtut }
160debc2429SAnton Nayshtut 
161debc2429SAnton Nayshtut static void
162debc2429SAnton Nayshtut stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
163debc2429SAnton Nayshtut {
164debc2429SAnton Nayshtut 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch), *tmp_ch;
165debc2429SAnton Nayshtut 	struct spdk_bdev_io *io;
166dc996557SBen Walker 	struct ut_bdev_io *bio;
167debc2429SAnton Nayshtut 
168debc2429SAnton Nayshtut 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
169debc2429SAnton Nayshtut 		TAILQ_FOREACH(tmp_ch, &g_ut_channels, link) {
170debc2429SAnton Nayshtut 			if (spdk_get_thread() == tmp_ch->thread) {
171debc2429SAnton Nayshtut 				stub_reset_channel(tmp_ch);
172debc2429SAnton Nayshtut 			} else {
173debc2429SAnton Nayshtut 				spdk_thread_send_msg(tmp_ch->thread, stub_reset_channel, tmp_ch);
174debc2429SAnton Nayshtut 			}
175debc2429SAnton Nayshtut 		}
176f048f3d3SShuhei Matsumoto 	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT) {
177dc996557SBen Walker 		TAILQ_FOREACH(bio, &ch->outstanding_io, link) {
178dc996557SBen Walker 			io = spdk_bdev_io_from_ctx(bio);
179f048f3d3SShuhei Matsumoto 			if (io == bdev_io->u.abort.bio_to_abort) {
180dc996557SBen Walker 				TAILQ_REMOVE(&ch->outstanding_io, bio, link);
181f048f3d3SShuhei Matsumoto 				ch->outstanding_cnt--;
182f048f3d3SShuhei Matsumoto 				spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_ABORTED);
183f048f3d3SShuhei Matsumoto 				ch->avail_cnt++;
184f048f3d3SShuhei Matsumoto 
185f048f3d3SShuhei Matsumoto 				spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
186f048f3d3SShuhei Matsumoto 				return;
187f048f3d3SShuhei Matsumoto 			}
188f048f3d3SShuhei Matsumoto 		}
189f048f3d3SShuhei Matsumoto 
190f048f3d3SShuhei Matsumoto 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
191f048f3d3SShuhei Matsumoto 		return;
19294bc8cfdSJim Harris 	}
19394bc8cfdSJim Harris 
1947e846d2bSJim Harris 	if (ch->avail_cnt > 0) {
195dc996557SBen Walker 		TAILQ_INSERT_TAIL(&ch->outstanding_io, (struct ut_bdev_io *)bdev_io->driver_ctx, link);
1967e846d2bSJim Harris 		ch->outstanding_cnt++;
1977e846d2bSJim Harris 		ch->avail_cnt--;
1987e846d2bSJim Harris 	} else {
19994bc8cfdSJim Harris 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
2007e846d2bSJim Harris 	}
201ab29d2ceSJim Harris }
202ab29d2ceSJim Harris 
2037e846d2bSJim Harris static uint32_t
2045e799ff4SDariusz Stojaczyk stub_complete_io(void *io_target, uint32_t num_to_complete)
205ab29d2ceSJim Harris {
2065e799ff4SDariusz Stojaczyk 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
207ab29d2ceSJim Harris 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
208dc996557SBen Walker 	struct ut_bdev_io *bio;
209ab29d2ceSJim Harris 	struct spdk_bdev_io *io;
2107e846d2bSJim Harris 	bool complete_all = (num_to_complete == 0);
2117e846d2bSJim Harris 	uint32_t num_completed = 0;
212ab29d2ceSJim Harris 
2137e846d2bSJim Harris 	while (complete_all || num_completed < num_to_complete) {
2147e846d2bSJim Harris 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
2157e846d2bSJim Harris 			break;
2167e846d2bSJim Harris 		}
217dc996557SBen Walker 		bio = TAILQ_FIRST(&ch->outstanding_io);
218dc996557SBen Walker 		TAILQ_REMOVE(&ch->outstanding_io, bio, link);
219dc996557SBen Walker 		io = spdk_bdev_io_from_ctx(bio);
2207e846d2bSJim Harris 		ch->outstanding_cnt--;
221ab29d2ceSJim Harris 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
2227e846d2bSJim Harris 		ch->avail_cnt++;
2237e846d2bSJim Harris 		num_completed++;
224ab29d2ceSJim Harris 	}
225ab29d2ceSJim Harris 	spdk_put_io_channel(_ch);
2267e846d2bSJim Harris 	return num_completed;
227ab29d2ceSJim Harris }
228ab29d2ceSJim Harris 
229f048f3d3SShuhei Matsumoto static bool
230f048f3d3SShuhei Matsumoto stub_io_type_supported(void *ctx, enum spdk_bdev_io_type type)
231f048f3d3SShuhei Matsumoto {
232f048f3d3SShuhei Matsumoto 	return true;
233f048f3d3SShuhei Matsumoto }
234f048f3d3SShuhei Matsumoto 
235674c7097SJim Harris static struct spdk_bdev_fn_table fn_table = {
236674c7097SJim Harris 	.get_io_channel =	stub_get_io_channel,
237674c7097SJim Harris 	.destruct =		stub_destruct,
238ab29d2ceSJim Harris 	.submit_request =	stub_submit_request,
239f048f3d3SShuhei Matsumoto 	.io_type_supported =	stub_io_type_supported,
240674c7097SJim Harris };
241674c7097SJim Harris 
242d83a3489STomasz Zawadzki struct spdk_bdev_module bdev_ut_if;
243d83a3489STomasz Zawadzki 
244674c7097SJim Harris static int
245674c7097SJim Harris module_init(void)
246674c7097SJim Harris {
247d83a3489STomasz Zawadzki 	spdk_bdev_module_init_done(&bdev_ut_if);
248674c7097SJim Harris 	return 0;
249674c7097SJim Harris }
250674c7097SJim Harris 
251674c7097SJim Harris static void
252674c7097SJim Harris module_fini(void)
253674c7097SJim Harris {
254674c7097SJim Harris }
255674c7097SJim Harris 
256a4a497d5Spaul luse static void
257a4a497d5Spaul luse init_complete(void)
258a4a497d5Spaul luse {
259a4a497d5Spaul luse 	g_init_complete_called = true;
260a4a497d5Spaul luse }
261a4a497d5Spaul luse 
262c899854dSJim Harris static void
263c899854dSJim Harris fini_start(void)
264c899854dSJim Harris {
265c899854dSJim Harris 	g_fini_start_called = true;
266c899854dSJim Harris }
267c899854dSJim Harris 
268dc996557SBen Walker static int
269dc996557SBen Walker get_ctx_size(void)
270dc996557SBen Walker {
271dc996557SBen Walker 	return sizeof(struct ut_bdev_io);
272dc996557SBen Walker }
273dc996557SBen Walker 
27419100ed5SDaniel Verkamp struct spdk_bdev_module bdev_ut_if = {
2754d367354SPawel Wodkowski 	.name = "bdev_ut",
2764d367354SPawel Wodkowski 	.module_init = module_init,
2774d367354SPawel Wodkowski 	.module_fini = module_fini,
278d83a3489STomasz Zawadzki 	.async_init = true,
279a4a497d5Spaul luse 	.init_complete = init_complete,
280c899854dSJim Harris 	.fini_start = fini_start,
281dc996557SBen Walker 	.get_ctx_size = get_ctx_size,
2824d367354SPawel Wodkowski };
2834d367354SPawel Wodkowski 
284df6b55fdSgila SPDK_BDEV_MODULE_REGISTER(bdev_ut, &bdev_ut_if)
285674c7097SJim Harris 
286674c7097SJim Harris static void
2875e799ff4SDariusz Stojaczyk register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
288674c7097SJim Harris {
2895e799ff4SDariusz Stojaczyk 	memset(ut_bdev, 0, sizeof(*ut_bdev));
290674c7097SJim Harris 
2915e799ff4SDariusz Stojaczyk 	ut_bdev->io_target = io_target;
2925e799ff4SDariusz Stojaczyk 	ut_bdev->bdev.ctxt = ut_bdev;
2935e799ff4SDariusz Stojaczyk 	ut_bdev->bdev.name = name;
2945e799ff4SDariusz Stojaczyk 	ut_bdev->bdev.fn_table = &fn_table;
2954d367354SPawel Wodkowski 	ut_bdev->bdev.module = &bdev_ut_if;
2965e799ff4SDariusz Stojaczyk 	ut_bdev->bdev.blocklen = 4096;
2975e799ff4SDariusz Stojaczyk 	ut_bdev->bdev.blockcnt = 1024;
2985e799ff4SDariusz Stojaczyk 
2995e799ff4SDariusz Stojaczyk 	spdk_bdev_register(&ut_bdev->bdev);
300674c7097SJim Harris }
301674c7097SJim Harris 
302674c7097SJim Harris static void
3035e799ff4SDariusz Stojaczyk unregister_bdev(struct ut_bdev *ut_bdev)
304674c7097SJim Harris {
305674c7097SJim Harris 	/* Handle any deferred messages. */
306674c7097SJim Harris 	poll_threads();
3075e799ff4SDariusz Stojaczyk 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
3087bcd316dSGangCao 	/* Handle the async bdev unregister. */
3097bcd316dSGangCao 	poll_threads();
310674c7097SJim Harris }
311674c7097SJim Harris 
312674c7097SJim Harris static void
313674c7097SJim Harris bdev_init_cb(void *done, int rc)
314674c7097SJim Harris {
315674c7097SJim Harris 	CU_ASSERT(rc == 0);
316674c7097SJim Harris 	*(bool *)done = true;
317674c7097SJim Harris }
318674c7097SJim Harris 
319674c7097SJim Harris static void
3206a684a34SShuhei Matsumoto _bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
3216a684a34SShuhei Matsumoto 	       void *event_ctx)
3226a684a34SShuhei Matsumoto {
3236a684a34SShuhei Matsumoto 	switch (type) {
3246a684a34SShuhei Matsumoto 	case SPDK_BDEV_EVENT_REMOVE:
3256a684a34SShuhei Matsumoto 		if (event_ctx != NULL) {
3266a684a34SShuhei Matsumoto 			*(bool *)event_ctx = true;
3276a684a34SShuhei Matsumoto 		}
3286a684a34SShuhei Matsumoto 		break;
3293522d43aSShuhei Matsumoto 	case SPDK_BDEV_EVENT_RESIZE:
3303522d43aSShuhei Matsumoto 		if (event_ctx != NULL) {
3313522d43aSShuhei Matsumoto 			*(int *)event_ctx += 1;
3323522d43aSShuhei Matsumoto 		}
3333522d43aSShuhei Matsumoto 		break;
3346a684a34SShuhei Matsumoto 	default:
3356a684a34SShuhei Matsumoto 		CU_ASSERT(false);
3366a684a34SShuhei Matsumoto 		break;
3376a684a34SShuhei Matsumoto 	}
3386a684a34SShuhei Matsumoto }
3396a684a34SShuhei Matsumoto 
3406a684a34SShuhei Matsumoto static void
341674c7097SJim Harris setup_test(void)
342674c7097SJim Harris {
343674c7097SJim Harris 	bool done = false;
3445a3e64efSKonrad Sztyber 	int rc;
345674c7097SJim Harris 
346debc2429SAnton Nayshtut 	TAILQ_INIT(&g_ut_channels);
347debc2429SAnton Nayshtut 
348972b3ae3SShuhei Matsumoto 	allocate_cores(BDEV_UT_NUM_THREADS);
349674c7097SJim Harris 	allocate_threads(BDEV_UT_NUM_THREADS);
350898739fbSBen Walker 	set_thread(0);
3515a3e64efSKonrad Sztyber 
3525a3e64efSKonrad Sztyber 	rc = spdk_iobuf_initialize();
3535a3e64efSKonrad Sztyber 	CU_ASSERT(rc == 0);
35444770c2aSBen Walker 	spdk_bdev_initialize(bdev_init_cb, &done);
3555e799ff4SDariusz Stojaczyk 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
356c9402000SBen Walker 				sizeof(struct ut_bdev_channel), NULL);
35780b22cf3SKonrad Sztyber 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
35880b22cf3SKonrad Sztyber 				ut_accel_ch_destroy_cb, 0, NULL);
3595e799ff4SDariusz Stojaczyk 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
3606a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
361674c7097SJim Harris }
362674c7097SJim Harris 
363674c7097SJim Harris static void
3646c54c13cSTomasz Zawadzki finish_cb(void *cb_arg)
3656c54c13cSTomasz Zawadzki {
3666c54c13cSTomasz Zawadzki 	g_teardown_done = true;
3676c54c13cSTomasz Zawadzki }
3686c54c13cSTomasz Zawadzki 
3696c54c13cSTomasz Zawadzki static void
370674c7097SJim Harris teardown_test(void)
371674c7097SJim Harris {
372898739fbSBen Walker 	set_thread(0);
3736c54c13cSTomasz Zawadzki 	g_teardown_done = false;
374674c7097SJim Harris 	spdk_bdev_close(g_desc);
375674c7097SJim Harris 	g_desc = NULL;
3765e799ff4SDariusz Stojaczyk 	unregister_bdev(&g_bdev);
3775e799ff4SDariusz Stojaczyk 	spdk_io_device_unregister(&g_io_device, NULL);
3786c54c13cSTomasz Zawadzki 	spdk_bdev_finish(finish_cb, NULL);
37980b22cf3SKonrad Sztyber 	spdk_io_device_unregister(&g_accel_io_device, NULL);
3805a3e64efSKonrad Sztyber 	spdk_iobuf_finish(finish_cb, NULL);
3816c54c13cSTomasz Zawadzki 	poll_threads();
382ce6a7cd8SSeth Howell 	memset(&g_bdev, 0, sizeof(g_bdev));
3836c54c13cSTomasz Zawadzki 	CU_ASSERT(g_teardown_done == true);
3846c54c13cSTomasz Zawadzki 	g_teardown_done = false;
385674c7097SJim Harris 	free_threads();
386972b3ae3SShuhei Matsumoto 	free_cores();
387debc2429SAnton Nayshtut 	CU_ASSERT(TAILQ_EMPTY(&g_ut_channels))
388674c7097SJim Harris }
389674c7097SJim Harris 
390a2142f3aSGangCao static uint32_t
391a2142f3aSGangCao bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
392a2142f3aSGangCao {
393a2142f3aSGangCao 	struct spdk_bdev_io *io;
394a2142f3aSGangCao 	uint32_t cnt = 0;
395a2142f3aSGangCao 
396e550391dSSeth Howell 	TAILQ_FOREACH(io, tailq, internal.link) {
397a2142f3aSGangCao 		cnt++;
398a2142f3aSGangCao 	}
399a2142f3aSGangCao 
400a2142f3aSGangCao 	return cnt;
401a2142f3aSGangCao }
402a2142f3aSGangCao 
403674c7097SJim Harris static void
404ab29d2ceSJim Harris basic(void)
405674c7097SJim Harris {
406a4a497d5Spaul luse 	g_init_complete_called = false;
407674c7097SJim Harris 	setup_test();
408a4a497d5Spaul luse 	CU_ASSERT(g_init_complete_called == true);
409674c7097SJim Harris 
410674c7097SJim Harris 	set_thread(0);
411674c7097SJim Harris 
4129aed854bSGangCao 	g_get_io_channel = false;
413674c7097SJim Harris 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
4149aed854bSGangCao 	CU_ASSERT(g_ut_threads[0].ch == NULL);
4159aed854bSGangCao 
4169aed854bSGangCao 	g_get_io_channel = true;
4179aed854bSGangCao 	g_create_ch = false;
4189aed854bSGangCao 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
4199aed854bSGangCao 	CU_ASSERT(g_ut_threads[0].ch == NULL);
4209aed854bSGangCao 
4219aed854bSGangCao 	g_get_io_channel = true;
4229aed854bSGangCao 	g_create_ch = true;
4239aed854bSGangCao 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
4249aed854bSGangCao 	CU_ASSERT(g_ut_threads[0].ch != NULL);
425674c7097SJim Harris 	spdk_put_io_channel(g_ut_threads[0].ch);
426674c7097SJim Harris 
427c899854dSJim Harris 	g_fini_start_called = false;
428674c7097SJim Harris 	teardown_test();
429c899854dSJim Harris 	CU_ASSERT(g_fini_start_called == true);
430674c7097SJim Harris }
431674c7097SJim Harris 
4320f2ef7cfSGangCao static void
433a7a2e272SBen Walker _bdev_unregistered(void *done, int rc)
434a7a2e272SBen Walker {
435a7a2e272SBen Walker 	CU_ASSERT(rc == 0);
436a7a2e272SBen Walker 	*(bool *)done = true;
437a7a2e272SBen Walker }
438a7a2e272SBen Walker 
439a7a2e272SBen Walker static void
440a7a2e272SBen Walker unregister_and_close(void)
441a7a2e272SBen Walker {
442a7a2e272SBen Walker 	bool done, remove_notify;
4437a260a5eSMaciej Szwed 	struct spdk_bdev_desc *desc = NULL;
444a7a2e272SBen Walker 
445a7a2e272SBen Walker 	setup_test();
446a7a2e272SBen Walker 	set_thread(0);
447a7a2e272SBen Walker 
448a7a2e272SBen Walker 	/* setup_test() automatically opens the bdev,
449a7a2e272SBen Walker 	 * but this test needs to do that in a different
450a7a2e272SBen Walker 	 * way. */
451a7a2e272SBen Walker 	spdk_bdev_close(g_desc);
452a7a2e272SBen Walker 	poll_threads();
453a7a2e272SBen Walker 
454f93b6fb0SDarek Stojaczyk 	/* Try hotremoving a bdev with descriptors which don't provide
4556a684a34SShuhei Matsumoto 	 * any context to the notification callback */
4566a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
457283abcb9SDarek Stojaczyk 	SPDK_CU_ASSERT_FATAL(desc != NULL);
458f93b6fb0SDarek Stojaczyk 
459f93b6fb0SDarek Stojaczyk 	/* There is an open descriptor on the device. Unregister it,
460f93b6fb0SDarek Stojaczyk 	 * which can't proceed until the descriptor is closed. */
461f93b6fb0SDarek Stojaczyk 	done = false;
462f93b6fb0SDarek Stojaczyk 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
463f93b6fb0SDarek Stojaczyk 
464f93b6fb0SDarek Stojaczyk 	/* Poll the threads to allow all events to be processed */
465f93b6fb0SDarek Stojaczyk 	poll_threads();
466f93b6fb0SDarek Stojaczyk 
467f93b6fb0SDarek Stojaczyk 	/* Make sure the bdev was not unregistered. We still have a
468f93b6fb0SDarek Stojaczyk 	 * descriptor open */
469f93b6fb0SDarek Stojaczyk 	CU_ASSERT(done == false);
470f93b6fb0SDarek Stojaczyk 
471f93b6fb0SDarek Stojaczyk 	spdk_bdev_close(desc);
472f93b6fb0SDarek Stojaczyk 	poll_threads();
4737a260a5eSMaciej Szwed 	desc = NULL;
474f93b6fb0SDarek Stojaczyk 
475f93b6fb0SDarek Stojaczyk 	/* The unregister should have completed */
476f93b6fb0SDarek Stojaczyk 	CU_ASSERT(done == true);
477f93b6fb0SDarek Stojaczyk 
478f93b6fb0SDarek Stojaczyk 
479f93b6fb0SDarek Stojaczyk 	/* Register the bdev again */
480f93b6fb0SDarek Stojaczyk 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
481f93b6fb0SDarek Stojaczyk 
482a7a2e272SBen Walker 	remove_notify = false;
4836a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
484283abcb9SDarek Stojaczyk 	SPDK_CU_ASSERT_FATAL(desc != NULL);
485a7a2e272SBen Walker 	CU_ASSERT(remove_notify == false);
486a7a2e272SBen Walker 
487a7a2e272SBen Walker 	/* There is an open descriptor on the device. Unregister it,
488a7a2e272SBen Walker 	 * which can't proceed until the descriptor is closed. */
489a7a2e272SBen Walker 	done = false;
490a7a2e272SBen Walker 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
491a7a2e272SBen Walker 	/* No polling has occurred, so neither of these should execute */
492a7a2e272SBen Walker 	CU_ASSERT(remove_notify == false);
493a7a2e272SBen Walker 	CU_ASSERT(done == false);
494a7a2e272SBen Walker 
495a7a2e272SBen Walker 	/* Prior to the unregister completing, close the descriptor */
496a7a2e272SBen Walker 	spdk_bdev_close(desc);
497a7a2e272SBen Walker 
498a7a2e272SBen Walker 	/* Poll the threads to allow all events to be processed */
499a7a2e272SBen Walker 	poll_threads();
500a7a2e272SBen Walker 
501a7a2e272SBen Walker 	/* Remove notify should not have been called because the
502a7a2e272SBen Walker 	 * descriptor is already closed. */
503a7a2e272SBen Walker 	CU_ASSERT(remove_notify == false);
504a7a2e272SBen Walker 
505a7a2e272SBen Walker 	/* The unregister should have completed */
506a7a2e272SBen Walker 	CU_ASSERT(done == true);
507a7a2e272SBen Walker 
508bb64a7e5SDarek Stojaczyk 	/* Restore the original g_bdev so that we can use teardown_test(). */
509bb64a7e5SDarek Stojaczyk 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
5106a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
511bb64a7e5SDarek Stojaczyk 	teardown_test();
512a7a2e272SBen Walker }
513a7a2e272SBen Walker 
514a7a2e272SBen Walker static void
515cf64422aSJim Harris unregister_and_close_different_threads(void)
516cf64422aSJim Harris {
517cf64422aSJim Harris 	bool done;
518cf64422aSJim Harris 	struct spdk_bdev_desc *desc = NULL;
519cf64422aSJim Harris 
520cf64422aSJim Harris 	setup_test();
521cf64422aSJim Harris 	set_thread(0);
522cf64422aSJim Harris 
523cf64422aSJim Harris 	/* setup_test() automatically opens the bdev,
524cf64422aSJim Harris 	 * but this test needs to do that in a different
525cf64422aSJim Harris 	 * way. */
526cf64422aSJim Harris 	spdk_bdev_close(g_desc);
527cf64422aSJim Harris 	poll_threads();
528cf64422aSJim Harris 
529cf64422aSJim Harris 	set_thread(1);
530cf64422aSJim Harris 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &desc);
531cf64422aSJim Harris 	SPDK_CU_ASSERT_FATAL(desc != NULL);
532cf64422aSJim Harris 	done = false;
533cf64422aSJim Harris 
534cf64422aSJim Harris 	set_thread(0);
535cf64422aSJim Harris 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
536cf64422aSJim Harris 
537cf64422aSJim Harris 	/* Poll the threads to allow all events to be processed */
538cf64422aSJim Harris 	poll_threads();
539cf64422aSJim Harris 
540cf64422aSJim Harris 	/* Make sure the bdev was not unregistered. We still have a
541cf64422aSJim Harris 	 * descriptor open */
542cf64422aSJim Harris 	CU_ASSERT(done == false);
543cf64422aSJim Harris 
544cf64422aSJim Harris 	/* Close the descriptor on thread 1.  Poll the thread and confirm the
545cf64422aSJim Harris 	 * unregister did not complete, since it was unregistered on thread 0.
546cf64422aSJim Harris 	 */
547cf64422aSJim Harris 	set_thread(1);
548cf64422aSJim Harris 	spdk_bdev_close(desc);
549cf64422aSJim Harris 	poll_thread(1);
550cf64422aSJim Harris 	CU_ASSERT(done == false);
551cf64422aSJim Harris 
552cf64422aSJim Harris 	/* Now poll thread 0 and confirm the unregister completed. */
553cf64422aSJim Harris 	set_thread(0);
554cf64422aSJim Harris 	poll_thread(0);
555cf64422aSJim Harris 	CU_ASSERT(done == true);
556cf64422aSJim Harris 
557cf64422aSJim Harris 	/* Restore the original g_bdev so that we can use teardown_test(). */
558cf64422aSJim Harris 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
559cf64422aSJim Harris 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
560cf64422aSJim Harris 	teardown_test();
561cf64422aSJim Harris }
562cf64422aSJim Harris 
563cf64422aSJim Harris static void
564ab29d2ceSJim Harris reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
565ab29d2ceSJim Harris {
566ab29d2ceSJim Harris 	bool *done = cb_arg;
567ab29d2ceSJim Harris 
568ab29d2ceSJim Harris 	CU_ASSERT(success == true);
569ab29d2ceSJim Harris 	*done = true;
570ab29d2ceSJim Harris 	spdk_bdev_free_io(bdev_io);
571ab29d2ceSJim Harris }
572ab29d2ceSJim Harris 
573ab29d2ceSJim Harris static void
574ab29d2ceSJim Harris put_channel_during_reset(void)
575ab29d2ceSJim Harris {
576ab29d2ceSJim Harris 	struct spdk_io_channel *io_ch;
577ab29d2ceSJim Harris 	bool done = false;
578d47eb51cSJinlong Chen 	uint32_t num_completed;
579ab29d2ceSJim Harris 
580ab29d2ceSJim Harris 	setup_test();
581ab29d2ceSJim Harris 
582ab29d2ceSJim Harris 	set_thread(0);
583ab29d2ceSJim Harris 	io_ch = spdk_bdev_get_io_channel(g_desc);
584ab29d2ceSJim Harris 	CU_ASSERT(io_ch != NULL);
585ab29d2ceSJim Harris 
586ab29d2ceSJim Harris 	/*
587ab29d2ceSJim Harris 	 * Start a reset, but then put the I/O channel before
588ab29d2ceSJim Harris 	 *  the deferred messages for the reset get a chance to
589ab29d2ceSJim Harris 	 *  execute.
590ab29d2ceSJim Harris 	 */
591ab29d2ceSJim Harris 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
592ab29d2ceSJim Harris 	spdk_put_io_channel(io_ch);
593ab29d2ceSJim Harris 	poll_threads();
594d47eb51cSJinlong Chen 
595d47eb51cSJinlong Chen 	/* Complete the reset. */
596d47eb51cSJinlong Chen 	num_completed = stub_complete_io(g_bdev.io_target, 0);
597d47eb51cSJinlong Chen 	CU_ASSERT(num_completed == 1);
598d47eb51cSJinlong Chen 	poll_threads();
599d47eb51cSJinlong Chen 	CU_ASSERT(done == true);
600ab29d2ceSJim Harris 
601ab29d2ceSJim Harris 	teardown_test();
602ab29d2ceSJim Harris }
603ab29d2ceSJim Harris 
6041f935c7aSJim Harris static void
6051f935c7aSJim Harris aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
6061f935c7aSJim Harris {
6071f935c7aSJim Harris 	enum spdk_bdev_io_status *status = cb_arg;
6081f935c7aSJim Harris 
6091f935c7aSJim Harris 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
6101f935c7aSJim Harris 	spdk_bdev_free_io(bdev_io);
6111f935c7aSJim Harris }
6121f935c7aSJim Harris 
613dfc98943SKrzysztof Karas static void io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg);
614dfc98943SKrzysztof Karas 
6151f935c7aSJim Harris static void
6161f935c7aSJim Harris aborted_reset(void)
6171f935c7aSJim Harris {
6181f935c7aSJim Harris 	struct spdk_io_channel *io_ch[2];
6191196deb5SSeth Howell 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
6201196deb5SSeth Howell 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
6211f935c7aSJim Harris 
6221f935c7aSJim Harris 	setup_test();
6231f935c7aSJim Harris 
6241f935c7aSJim Harris 	set_thread(0);
6251f935c7aSJim Harris 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
6261f935c7aSJim Harris 	CU_ASSERT(io_ch[0] != NULL);
6271f935c7aSJim Harris 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
6281f935c7aSJim Harris 	poll_threads();
62986947c89SSeth Howell 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
6301f935c7aSJim Harris 
6311f935c7aSJim Harris 	/*
6321f935c7aSJim Harris 	 * First reset has been submitted on ch0.  Now submit a second
6331f935c7aSJim Harris 	 *  reset on ch1 which will get queued since there is already a
6341f935c7aSJim Harris 	 *  reset in progress.
6351f935c7aSJim Harris 	 */
6361f935c7aSJim Harris 	set_thread(1);
6371f935c7aSJim Harris 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
6381f935c7aSJim Harris 	CU_ASSERT(io_ch[1] != NULL);
6391f935c7aSJim Harris 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
6401f935c7aSJim Harris 	poll_threads();
64186947c89SSeth Howell 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
6421f935c7aSJim Harris 
6431f935c7aSJim Harris 	/*
644d47eb51cSJinlong Chen 	 * Now destroy ch1.  Nothing would really happen because the pending second reset
645d47eb51cSJinlong Chen 	 *  is still holding a reference of ch1.
6461f935c7aSJim Harris 	 */
6471f935c7aSJim Harris 	set_thread(1);
6481f935c7aSJim Harris 	spdk_put_io_channel(io_ch[1]);
6491f935c7aSJim Harris 	poll_threads();
650d47eb51cSJinlong Chen 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
65186947c89SSeth Howell 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
6521f935c7aSJim Harris 
6531f935c7aSJim Harris 	/*
654d47eb51cSJinlong Chen 	 * Now complete the first reset, verify that both resets completed with SUCCESS
65586947c89SSeth Howell 	 *  status and that bdev->internal.reset_in_progress is also set back to NULL.
6561f935c7aSJim Harris 	 */
6571f935c7aSJim Harris 	set_thread(0);
6581f935c7aSJim Harris 	spdk_put_io_channel(io_ch[0]);
6595e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 0);
6601f935c7aSJim Harris 	poll_threads();
6611f935c7aSJim Harris 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
662d47eb51cSJinlong Chen 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
66386947c89SSeth Howell 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
6641f935c7aSJim Harris 
665d47eb51cSJinlong Chen 	/*
666d47eb51cSJinlong Chen 	 * Teardown should succeed.
667d47eb51cSJinlong Chen 	 */
6681f935c7aSJim Harris 	teardown_test();
6691f935c7aSJim Harris }
6701f935c7aSJim Harris 
671b4ffeaecSJim Harris static void
672dfc98943SKrzysztof Karas aborted_reset_no_outstanding_io(void)
673dfc98943SKrzysztof Karas {
674dfc98943SKrzysztof Karas 	struct spdk_io_channel *io_ch[2];
675dfc98943SKrzysztof Karas 	struct spdk_bdev_channel *bdev_ch[2];
676dfc98943SKrzysztof Karas 	struct spdk_bdev *bdev[2];
677dfc98943SKrzysztof Karas 	enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
678dfc98943SKrzysztof Karas 				 status2 = SPDK_BDEV_IO_STATUS_PENDING;
679dfc98943SKrzysztof Karas 
680dfc98943SKrzysztof Karas 	setup_test();
681dfc98943SKrzysztof Karas 
682dfc98943SKrzysztof Karas 	/*
683dfc98943SKrzysztof Karas 	 * This time we test the reset without any outstanding IO
684dfc98943SKrzysztof Karas 	 * present on the bdev channel, so both resets should finish
685dfc98943SKrzysztof Karas 	 * immediately.
686dfc98943SKrzysztof Karas 	 */
687dfc98943SKrzysztof Karas 
688dfc98943SKrzysztof Karas 	set_thread(0);
689dfc98943SKrzysztof Karas 	/* Set reset_io_drain_timeout to allow bdev
690dfc98943SKrzysztof Karas 	 * reset to stay pending until we call abort. */
691dfc98943SKrzysztof Karas 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
692dfc98943SKrzysztof Karas 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
693dfc98943SKrzysztof Karas 	bdev[0] = bdev_ch[0]->bdev;
694b45556e2SChangpeng Liu 	bdev[0]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
695dfc98943SKrzysztof Karas 	CU_ASSERT(io_ch[0] != NULL);
696dfc98943SKrzysztof Karas 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
697dfc98943SKrzysztof Karas 	poll_threads();
698dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
699dfc98943SKrzysztof Karas 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
700dfc98943SKrzysztof Karas 	spdk_put_io_channel(io_ch[0]);
701dfc98943SKrzysztof Karas 
702dfc98943SKrzysztof Karas 	set_thread(1);
703dfc98943SKrzysztof Karas 	/* Set reset_io_drain_timeout to allow bdev
704dfc98943SKrzysztof Karas 	 * reset to stay pending until we call abort. */
705dfc98943SKrzysztof Karas 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
706dfc98943SKrzysztof Karas 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
707dfc98943SKrzysztof Karas 	bdev[1] = bdev_ch[1]->bdev;
708b45556e2SChangpeng Liu 	bdev[1]->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
709dfc98943SKrzysztof Karas 	CU_ASSERT(io_ch[1] != NULL);
710dfc98943SKrzysztof Karas 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
711dfc98943SKrzysztof Karas 	poll_threads();
712dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
713dfc98943SKrzysztof Karas 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
714dfc98943SKrzysztof Karas 	spdk_put_io_channel(io_ch[1]);
715dfc98943SKrzysztof Karas 
716dfc98943SKrzysztof Karas 	stub_complete_io(g_bdev.io_target, 0);
717dfc98943SKrzysztof Karas 	poll_threads();
718dfc98943SKrzysztof Karas 
719dfc98943SKrzysztof Karas 	teardown_test();
720dfc98943SKrzysztof Karas }
721dfc98943SKrzysztof Karas 
722dfc98943SKrzysztof Karas 
723dfc98943SKrzysztof Karas static void
724a2142f3aSGangCao io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
725b4ffeaecSJim Harris {
726b4ffeaecSJim Harris 	enum spdk_bdev_io_status *status = cb_arg;
727b4ffeaecSJim Harris 
728f048f3d3SShuhei Matsumoto 	*status = bdev_io->internal.status;
729b4ffeaecSJim Harris 	spdk_bdev_free_io(bdev_io);
730b4ffeaecSJim Harris }
731b4ffeaecSJim Harris 
732b4ffeaecSJim Harris static void
733b4ffeaecSJim Harris io_during_reset(void)
734b4ffeaecSJim Harris {
735b4ffeaecSJim Harris 	struct spdk_io_channel *io_ch[2];
736b4ffeaecSJim Harris 	struct spdk_bdev_channel *bdev_ch[2];
737b4ffeaecSJim Harris 	enum spdk_bdev_io_status status0, status1, status_reset;
738b4ffeaecSJim Harris 	int rc;
739b4ffeaecSJim Harris 
740b4ffeaecSJim Harris 	setup_test();
741b4ffeaecSJim Harris 
742b4ffeaecSJim Harris 	/*
743b4ffeaecSJim Harris 	 * First test normal case - submit an I/O on each of two channels (with no resets)
744b4ffeaecSJim Harris 	 *  and verify they complete successfully.
745b4ffeaecSJim Harris 	 */
746b4ffeaecSJim Harris 	set_thread(0);
747b4ffeaecSJim Harris 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
748b4ffeaecSJim Harris 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
749b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[0]->flags == 0);
750b4ffeaecSJim Harris 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
751a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
752b4ffeaecSJim Harris 	CU_ASSERT(rc == 0);
753b4ffeaecSJim Harris 
754b4ffeaecSJim Harris 	set_thread(1);
755b4ffeaecSJim Harris 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
756b4ffeaecSJim Harris 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
757b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[1]->flags == 0);
758b4ffeaecSJim Harris 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
759a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
760b4ffeaecSJim Harris 	CU_ASSERT(rc == 0);
761b4ffeaecSJim Harris 
762b4ffeaecSJim Harris 	poll_threads();
763b4ffeaecSJim Harris 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
764b4ffeaecSJim Harris 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
765b4ffeaecSJim Harris 
766b4ffeaecSJim Harris 	set_thread(0);
7675e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 0);
768b4ffeaecSJim Harris 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
769b4ffeaecSJim Harris 
770b4ffeaecSJim Harris 	set_thread(1);
7715e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 0);
772b4ffeaecSJim Harris 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
773b4ffeaecSJim Harris 
774b4ffeaecSJim Harris 	/*
7753ef479abSBen Walker 	 * Now submit a reset, and leave it pending while we submit I/O on two different
776b4ffeaecSJim Harris 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
777b4ffeaecSJim Harris 	 *  progress.
778b4ffeaecSJim Harris 	 */
779b4ffeaecSJim Harris 	set_thread(0);
780b4ffeaecSJim Harris 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
781a2142f3aSGangCao 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
782b4ffeaecSJim Harris 	CU_ASSERT(rc == 0);
783b4ffeaecSJim Harris 
784b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[0]->flags == 0);
785b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[1]->flags == 0);
786b4ffeaecSJim Harris 	poll_threads();
787b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
788b4ffeaecSJim Harris 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
789b4ffeaecSJim Harris 
790b4ffeaecSJim Harris 	set_thread(0);
791b4ffeaecSJim Harris 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
792a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
793b4ffeaecSJim Harris 	CU_ASSERT(rc == 0);
794b4ffeaecSJim Harris 
795b4ffeaecSJim Harris 	set_thread(1);
796b4ffeaecSJim Harris 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
797a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
798b4ffeaecSJim Harris 	CU_ASSERT(rc == 0);
799b4ffeaecSJim Harris 
800b4ffeaecSJim Harris 	/*
8018d4b319cSShuhei Matsumoto 	 * A reset is in progress so these read I/O should complete with aborted.  Note that we
802b4ffeaecSJim Harris 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
803b4ffeaecSJim Harris 	 */
804b4ffeaecSJim Harris 	poll_threads();
805b4ffeaecSJim Harris 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
8068d4b319cSShuhei Matsumoto 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
8078d4b319cSShuhei Matsumoto 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
808b4ffeaecSJim Harris 
8093ef479abSBen Walker 	/*
8103ef479abSBen Walker 	 * Complete the reset
8113ef479abSBen Walker 	 */
812b4ffeaecSJim Harris 	set_thread(0);
8135e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 0);
8143ef479abSBen Walker 
8153ef479abSBen Walker 	/*
8163ef479abSBen Walker 	 * Only poll thread 0. We should not get a completion.
8173ef479abSBen Walker 	 */
8183ef479abSBen Walker 	poll_thread(0);
8193ef479abSBen Walker 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
8203ef479abSBen Walker 
8213ef479abSBen Walker 	/*
8223ef479abSBen Walker 	 * Poll both thread 0 and 1 so the messages can propagate and we
8233ef479abSBen Walker 	 * get a completion.
8243ef479abSBen Walker 	 */
8253ef479abSBen Walker 	poll_threads();
8263ef479abSBen Walker 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
8273ef479abSBen Walker 
828b4ffeaecSJim Harris 	spdk_put_io_channel(io_ch[0]);
829b4ffeaecSJim Harris 	set_thread(1);
830b4ffeaecSJim Harris 	spdk_put_io_channel(io_ch[1]);
831b4ffeaecSJim Harris 	poll_threads();
832b4ffeaecSJim Harris 
833b4ffeaecSJim Harris 	teardown_test();
834b4ffeaecSJim Harris }
835b4ffeaecSJim Harris 
836dfc98943SKrzysztof Karas static uint32_t
837dfc98943SKrzysztof Karas count_queued_resets(void *io_target)
838dfc98943SKrzysztof Karas {
839dfc98943SKrzysztof Karas 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
840dfc98943SKrzysztof Karas 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
841dc996557SBen Walker 	struct ut_bdev_io *bio;
842dfc98943SKrzysztof Karas 	struct spdk_bdev_io *io;
843dfc98943SKrzysztof Karas 	uint32_t submitted_resets = 0;
844dfc98943SKrzysztof Karas 
845dc996557SBen Walker 	TAILQ_FOREACH(bio, &ch->outstanding_io, link) {
846dc996557SBen Walker 		io = spdk_bdev_io_from_ctx(bio);
847dfc98943SKrzysztof Karas 		if (io->type == SPDK_BDEV_IO_TYPE_RESET) {
848dfc98943SKrzysztof Karas 			submitted_resets++;
849dfc98943SKrzysztof Karas 		}
850dfc98943SKrzysztof Karas 	}
851dfc98943SKrzysztof Karas 
852dfc98943SKrzysztof Karas 	spdk_put_io_channel(_ch);
853dfc98943SKrzysztof Karas 
854dfc98943SKrzysztof Karas 	return submitted_resets;
855dfc98943SKrzysztof Karas }
856dfc98943SKrzysztof Karas 
857dfc98943SKrzysztof Karas static void
858dfc98943SKrzysztof Karas reset_completions(void)
859dfc98943SKrzysztof Karas {
860dfc98943SKrzysztof Karas 	struct spdk_io_channel *io_ch;
861dfc98943SKrzysztof Karas 	struct spdk_bdev_channel *bdev_ch;
862dfc98943SKrzysztof Karas 	struct spdk_bdev *bdev;
863dfc98943SKrzysztof Karas 	enum spdk_bdev_io_status status0, status_reset;
864dfc98943SKrzysztof Karas 	int rc, iter;
865dfc98943SKrzysztof Karas 
866dfc98943SKrzysztof Karas 	setup_test();
867dfc98943SKrzysztof Karas 
868dfc98943SKrzysztof Karas 	/* This test covers four test cases:
869dfc98943SKrzysztof Karas 	 * 1) reset_io_drain_timeout of a bdev is greater than 0
870dfc98943SKrzysztof Karas 	 * 2) No outstandind IO are present on any bdev channel
871dfc98943SKrzysztof Karas 	 * 3) Outstanding IO finish during bdev reset
872dfc98943SKrzysztof Karas 	 * 4) Outstanding IO do not finish before reset is done waiting
873dfc98943SKrzysztof Karas 	 *    for them.
874dfc98943SKrzysztof Karas 	 *
875dfc98943SKrzysztof Karas 	 * Above conditions mainly affect the timing of bdev reset completion
876dfc98943SKrzysztof Karas 	 * and whether a reset should be skipped via spdk_bdev_io_complete()
877dfc98943SKrzysztof Karas 	 * or sent down to the underlying bdev module via bdev_io_submit_reset(). */
878dfc98943SKrzysztof Karas 
879dfc98943SKrzysztof Karas 	/* Test preparation */
880dfc98943SKrzysztof Karas 	set_thread(0);
881dfc98943SKrzysztof Karas 	io_ch = spdk_bdev_get_io_channel(g_desc);
882dfc98943SKrzysztof Karas 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
883dfc98943SKrzysztof Karas 	CU_ASSERT(bdev_ch->flags == 0);
884dfc98943SKrzysztof Karas 
885dfc98943SKrzysztof Karas 
886dfc98943SKrzysztof Karas 	/* Test case 1) reset_io_drain_timeout set to 0. Reset should be sent down immediately. */
887dfc98943SKrzysztof Karas 	bdev = &g_bdev.bdev;
888dfc98943SKrzysztof Karas 	bdev->reset_io_drain_timeout = 0;
889dfc98943SKrzysztof Karas 
890dfc98943SKrzysztof Karas 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
891dfc98943SKrzysztof Karas 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
892dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
893dfc98943SKrzysztof Karas 	poll_threads();
894dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
895dfc98943SKrzysztof Karas 
896dfc98943SKrzysztof Karas 	/* Call reset completion inside bdev module. */
897dfc98943SKrzysztof Karas 	stub_complete_io(g_bdev.io_target, 0);
898dfc98943SKrzysztof Karas 	poll_threads();
899dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
900dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
901dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
902dfc98943SKrzysztof Karas 
903dfc98943SKrzysztof Karas 
904dfc98943SKrzysztof Karas 	/* Test case 2) no outstanding IO are present. Reset should perform one iteration over
905dfc98943SKrzysztof Karas 	* channels and then be skipped. */
906b45556e2SChangpeng Liu 	bdev->reset_io_drain_timeout = SPDK_BDEV_RESET_IO_DRAIN_RECOMMENDED_VALUE;
907dfc98943SKrzysztof Karas 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
908dfc98943SKrzysztof Karas 
909dfc98943SKrzysztof Karas 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
910dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
911dfc98943SKrzysztof Karas 	poll_threads();
912dfc98943SKrzysztof Karas 	/* Reset was never submitted to the bdev module. */
913dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
914dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
915dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
916dfc98943SKrzysztof Karas 
917dfc98943SKrzysztof Karas 
918dfc98943SKrzysztof Karas 	/* Test case 3) outstanding IO finish during bdev reset procedure. Reset should initiate
919dfc98943SKrzysztof Karas 	* wait poller to check for IO completions every second, until reset_io_drain_timeout is
920dfc98943SKrzysztof Karas 	* reached, but finish earlier than this threshold. */
921dfc98943SKrzysztof Karas 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
922dfc98943SKrzysztof Karas 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
923dfc98943SKrzysztof Karas 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
924dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
925dfc98943SKrzysztof Karas 
926dfc98943SKrzysztof Karas 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
927dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
928dfc98943SKrzysztof Karas 	poll_threads();
929dfc98943SKrzysztof Karas 	/* The reset just started and should not have been submitted yet. */
930dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
931dfc98943SKrzysztof Karas 
932dfc98943SKrzysztof Karas 	poll_threads();
933dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
934dfc98943SKrzysztof Karas 	/* Let the poller wait for about half the time then complete outstanding IO. */
935dfc98943SKrzysztof Karas 	for (iter = 0; iter < 2; iter++) {
936dfc98943SKrzysztof Karas 		/* Reset is still processing and not submitted at this point. */
937dfc98943SKrzysztof Karas 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
938dfc98943SKrzysztof Karas 		spdk_delay_us(1000 * 1000);
939dfc98943SKrzysztof Karas 		poll_threads();
940dfc98943SKrzysztof Karas 		poll_threads();
941dfc98943SKrzysztof Karas 	}
942dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
943dfc98943SKrzysztof Karas 	stub_complete_io(g_bdev.io_target, 0);
944dfc98943SKrzysztof Karas 	poll_threads();
945dfc98943SKrzysztof Karas 	spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
946dfc98943SKrzysztof Karas 	poll_threads();
947dfc98943SKrzysztof Karas 	poll_threads();
948dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
949dfc98943SKrzysztof Karas 	/* Sending reset to the bdev module has been skipped. */
950dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
951dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
952dfc98943SKrzysztof Karas 
953dfc98943SKrzysztof Karas 
954dfc98943SKrzysztof Karas 	/* Test case 4) outstanding IO are still present after reset_io_drain_timeout
955dfc98943SKrzysztof Karas 	* seconds have passed. */
956dfc98943SKrzysztof Karas 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
957dfc98943SKrzysztof Karas 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
958dfc98943SKrzysztof Karas 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, io_during_io_done, &status0);
959dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
960dfc98943SKrzysztof Karas 
961dfc98943SKrzysztof Karas 	rc = spdk_bdev_reset(g_desc, io_ch, io_during_io_done, &status_reset);
962dfc98943SKrzysztof Karas 	CU_ASSERT(rc == 0);
963dfc98943SKrzysztof Karas 	poll_threads();
964dfc98943SKrzysztof Karas 	/* The reset just started and should not have been submitted yet. */
965dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
966dfc98943SKrzysztof Karas 
967dfc98943SKrzysztof Karas 	poll_threads();
968dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
969dfc98943SKrzysztof Karas 	/* Let the poller wait for reset_io_drain_timeout seconds. */
970dfc98943SKrzysztof Karas 	for (iter = 0; iter < bdev->reset_io_drain_timeout; iter++) {
971dfc98943SKrzysztof Karas 		CU_ASSERT(count_queued_resets(g_bdev.io_target) == 0);
972dfc98943SKrzysztof Karas 		spdk_delay_us(BDEV_RESET_CHECK_OUTSTANDING_IO_PERIOD);
973dfc98943SKrzysztof Karas 		poll_threads();
974dfc98943SKrzysztof Karas 		poll_threads();
975dfc98943SKrzysztof Karas 	}
976dfc98943SKrzysztof Karas 
977dfc98943SKrzysztof Karas 	/* After timing out, the reset should have been sent to the module. */
978dfc98943SKrzysztof Karas 	CU_ASSERT(count_queued_resets(g_bdev.io_target) == 1);
979dfc98943SKrzysztof Karas 	/* Complete reset submitted to the module and the read IO. */
980dfc98943SKrzysztof Karas 	stub_complete_io(g_bdev.io_target, 0);
981dfc98943SKrzysztof Karas 	poll_threads();
982dfc98943SKrzysztof Karas 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
983dfc98943SKrzysztof Karas 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
984dfc98943SKrzysztof Karas 
985dfc98943SKrzysztof Karas 
986dfc98943SKrzysztof Karas 	/* Destroy the channel and end the test. */
987dfc98943SKrzysztof Karas 	spdk_put_io_channel(io_ch);
988dfc98943SKrzysztof Karas 	poll_threads();
989dfc98943SKrzysztof Karas 
990dfc98943SKrzysztof Karas 	teardown_test();
991dfc98943SKrzysztof Karas }
992dfc98943SKrzysztof Karas 
993dfc98943SKrzysztof Karas 
99494bc8cfdSJim Harris static void
995a2142f3aSGangCao basic_qos(void)
996a2142f3aSGangCao {
997d859e3ccSBen Walker 	struct spdk_io_channel *io_ch[2];
9982e1dbc45SBen Walker 	struct spdk_bdev_channel *bdev_ch[2];
999a2142f3aSGangCao 	struct spdk_bdev *bdev;
1000f048f3d3SShuhei Matsumoto 	enum spdk_bdev_io_status status, abort_status;
1001a2142f3aSGangCao 	int rc;
1002a2142f3aSGangCao 
1003a2142f3aSGangCao 	setup_test();
1004a2142f3aSGangCao 
1005d859e3ccSBen Walker 	/* Enable QoS */
1006d859e3ccSBen Walker 	bdev = &g_bdev.bdev;
100786947c89SSeth Howell 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
100886947c89SSeth Howell 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
10097191c4bdSGangCao 	/*
1010316cb3b1SGangCao 	 * Enable read/write IOPS, read only byte per second and
1011316cb3b1SGangCao 	 * read/write byte per second rate limits.
1012316cb3b1SGangCao 	 * In this case, all rate limits will take equal effect.
10137191c4bdSGangCao 	 */
1014316cb3b1SGangCao 	/* 2000 read/write I/O per second, or 2 per millisecond */
10157d030ef7SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
1016316cb3b1SGangCao 	/* 8K read/write byte per millisecond with 4K block size */
10177d030ef7SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1018316cb3b1SGangCao 	/* 8K read only byte per millisecond with 4K block size */
1019316cb3b1SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 8192000;
1020d859e3ccSBen Walker 
1021a2142f3aSGangCao 	g_get_io_channel = true;
1022a2142f3aSGangCao 
1023a2142f3aSGangCao 	set_thread(0);
1024d859e3ccSBen Walker 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
10252e1dbc45SBen Walker 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
10262e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1027a2142f3aSGangCao 
1028a2142f3aSGangCao 	set_thread(1);
1029a2142f3aSGangCao 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
10302e1dbc45SBen Walker 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
10312e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1032a2142f3aSGangCao 
1033a2142f3aSGangCao 	/*
1034d859e3ccSBen Walker 	 * Send an I/O on thread 0, which is where the QoS thread is running.
1035a2142f3aSGangCao 	 */
1036a2142f3aSGangCao 	set_thread(0);
1037a2142f3aSGangCao 	status = SPDK_BDEV_IO_STATUS_PENDING;
1038a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1039a2142f3aSGangCao 	CU_ASSERT(rc == 0);
1040a2142f3aSGangCao 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1041d859e3ccSBen Walker 	poll_threads();
1042d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1043a2142f3aSGangCao 	poll_threads();
1044a2142f3aSGangCao 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1045a2142f3aSGangCao 
1046d859e3ccSBen Walker 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1047d859e3ccSBen Walker 	status = SPDK_BDEV_IO_STATUS_PENDING;
1048a2142f3aSGangCao 	set_thread(1);
1049d859e3ccSBen Walker 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1050a2142f3aSGangCao 	CU_ASSERT(rc == 0);
1051d859e3ccSBen Walker 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1052d859e3ccSBen Walker 	poll_threads();
10531450c547SGangCao 	/* Complete I/O on thread 0. This should not complete the I/O we submitted. */
10541450c547SGangCao 	set_thread(0);
1055d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1056d859e3ccSBen Walker 	poll_threads();
1057d859e3ccSBen Walker 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
10581450c547SGangCao 	/* Now complete I/O on original thread 1. */
10591450c547SGangCao 	set_thread(1);
1060d859e3ccSBen Walker 	poll_threads();
1061d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1062d859e3ccSBen Walker 	poll_threads();
1063d859e3ccSBen Walker 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
1064a2142f3aSGangCao 
1065f048f3d3SShuhei Matsumoto 	/* Reset rate limit for the next test cases. */
1066f048f3d3SShuhei Matsumoto 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
1067f048f3d3SShuhei Matsumoto 	poll_threads();
1068f048f3d3SShuhei Matsumoto 
1069f048f3d3SShuhei Matsumoto 	/*
1070f048f3d3SShuhei Matsumoto 	 * Test abort request when QoS is enabled.
1071f048f3d3SShuhei Matsumoto 	 */
1072f048f3d3SShuhei Matsumoto 
1073f048f3d3SShuhei Matsumoto 	/* Send an I/O on thread 0, which is where the QoS thread is running. */
1074f048f3d3SShuhei Matsumoto 	set_thread(0);
1075f048f3d3SShuhei Matsumoto 	status = SPDK_BDEV_IO_STATUS_PENDING;
1076f048f3d3SShuhei Matsumoto 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
1077f048f3d3SShuhei Matsumoto 	CU_ASSERT(rc == 0);
1078f048f3d3SShuhei Matsumoto 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1079f048f3d3SShuhei Matsumoto 	/* Send an abort to the I/O on the same thread. */
1080f048f3d3SShuhei Matsumoto 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1081f048f3d3SShuhei Matsumoto 	rc = spdk_bdev_abort(g_desc, io_ch[0], &status, io_during_io_done, &abort_status);
1082f048f3d3SShuhei Matsumoto 	CU_ASSERT(rc == 0);
1083f048f3d3SShuhei Matsumoto 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1084f048f3d3SShuhei Matsumoto 	poll_threads();
1085f048f3d3SShuhei Matsumoto 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1086f048f3d3SShuhei Matsumoto 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1087f048f3d3SShuhei Matsumoto 
1088f048f3d3SShuhei Matsumoto 	/* Send an I/O on thread 1. The QoS thread is not running here. */
1089f048f3d3SShuhei Matsumoto 	status = SPDK_BDEV_IO_STATUS_PENDING;
1090f048f3d3SShuhei Matsumoto 	set_thread(1);
1091f048f3d3SShuhei Matsumoto 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
1092f048f3d3SShuhei Matsumoto 	CU_ASSERT(rc == 0);
1093f048f3d3SShuhei Matsumoto 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
1094f048f3d3SShuhei Matsumoto 	poll_threads();
1095f048f3d3SShuhei Matsumoto 	/* Send an abort to the I/O on the same thread. */
1096f048f3d3SShuhei Matsumoto 	abort_status = SPDK_BDEV_IO_STATUS_PENDING;
1097f048f3d3SShuhei Matsumoto 	rc = spdk_bdev_abort(g_desc, io_ch[1], &status, io_during_io_done, &abort_status);
1098f048f3d3SShuhei Matsumoto 	CU_ASSERT(rc == 0);
1099f048f3d3SShuhei Matsumoto 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_PENDING);
1100f048f3d3SShuhei Matsumoto 	poll_threads();
1101f048f3d3SShuhei Matsumoto 	/* Complete the I/O with failure and the abort with success on thread 1. */
1102f048f3d3SShuhei Matsumoto 	CU_ASSERT(abort_status == SPDK_BDEV_IO_STATUS_SUCCESS);
1103f048f3d3SShuhei Matsumoto 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_ABORTED);
1104f048f3d3SShuhei Matsumoto 
1105f048f3d3SShuhei Matsumoto 	set_thread(0);
1106f048f3d3SShuhei Matsumoto 
1107120825c9SGangCao 	/*
1108120825c9SGangCao 	 * Close the descriptor only, which should stop the qos channel as
1109120825c9SGangCao 	 * the last descriptor removed.
1110120825c9SGangCao 	 */
1111120825c9SGangCao 	spdk_bdev_close(g_desc);
1112120825c9SGangCao 	poll_threads();
1113120825c9SGangCao 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1114120825c9SGangCao 
1115120825c9SGangCao 	/*
1116120825c9SGangCao 	 * Open the bdev again which shall setup the qos channel as the
1117120825c9SGangCao 	 * channels are valid.
1118120825c9SGangCao 	 */
11196a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1120120825c9SGangCao 	poll_threads();
1121120825c9SGangCao 	CU_ASSERT(bdev->internal.qos->ch != NULL);
1122120825c9SGangCao 
1123d859e3ccSBen Walker 	/* Tear down the channels */
1124d859e3ccSBen Walker 	set_thread(0);
1125d859e3ccSBen Walker 	spdk_put_io_channel(io_ch[0]);
1126d859e3ccSBen Walker 	set_thread(1);
1127d859e3ccSBen Walker 	spdk_put_io_channel(io_ch[1]);
1128d859e3ccSBen Walker 	poll_threads();
1129d859e3ccSBen Walker 	set_thread(0);
1130d859e3ccSBen Walker 
1131d859e3ccSBen Walker 	/* Close the descriptor, which should stop the qos channel */
1132d859e3ccSBen Walker 	spdk_bdev_close(g_desc);
1133aac1f5f9SBen Walker 	poll_threads();
113486947c89SSeth Howell 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1135d859e3ccSBen Walker 
1136120825c9SGangCao 	/* Open the bdev again, no qos channel setup without valid channels. */
11376a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
1138120825c9SGangCao 	poll_threads();
1139120825c9SGangCao 	CU_ASSERT(bdev->internal.qos->ch == NULL);
1140d859e3ccSBen Walker 
1141d859e3ccSBen Walker 	/* Create the channels in reverse order. */
1142a2142f3aSGangCao 	set_thread(1);
1143a2142f3aSGangCao 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
11442e1dbc45SBen Walker 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
11452e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1146a2142f3aSGangCao 
1147a2142f3aSGangCao 	set_thread(0);
1148d859e3ccSBen Walker 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
11492e1dbc45SBen Walker 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
11502e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1151a2142f3aSGangCao 
1152aac1f5f9SBen Walker 	/* Confirm that the qos thread is now thread 1 */
115386947c89SSeth Howell 	CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
1154a2142f3aSGangCao 
1155d859e3ccSBen Walker 	/* Tear down the channels */
1156a2142f3aSGangCao 	set_thread(0);
1157a2142f3aSGangCao 	spdk_put_io_channel(io_ch[0]);
1158a2142f3aSGangCao 	set_thread(1);
1159a2142f3aSGangCao 	spdk_put_io_channel(io_ch[1]);
1160a2142f3aSGangCao 	poll_threads();
1161a2142f3aSGangCao 
1162d859e3ccSBen Walker 	set_thread(0);
1163d859e3ccSBen Walker 
1164a2142f3aSGangCao 	teardown_test();
1165a2142f3aSGangCao }
1166a2142f3aSGangCao 
1167a2142f3aSGangCao static void
1168a2142f3aSGangCao io_during_qos_queue(void)
1169a2142f3aSGangCao {
1170d859e3ccSBen Walker 	struct spdk_io_channel *io_ch[2];
11712e1dbc45SBen Walker 	struct spdk_bdev_channel *bdev_ch[2];
1172a2142f3aSGangCao 	struct spdk_bdev *bdev;
1173316cb3b1SGangCao 	enum spdk_bdev_io_status status0, status1, status2;
1174a2142f3aSGangCao 	int rc;
1175a2142f3aSGangCao 
1176a2142f3aSGangCao 	setup_test();
11773c981508SBen Walker 	MOCK_SET(spdk_get_ticks, 0);
1178a2142f3aSGangCao 
1179d859e3ccSBen Walker 	/* Enable QoS */
1180d859e3ccSBen Walker 	bdev = &g_bdev.bdev;
118186947c89SSeth Howell 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
118286947c89SSeth Howell 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1183055de83aSAnton Nayshtut 
11847191c4bdSGangCao 	/*
1185316cb3b1SGangCao 	 * Enable read/write IOPS, read only byte per sec, write only
1186316cb3b1SGangCao 	 * byte per sec and read/write byte per sec rate limits.
1187316cb3b1SGangCao 	 * In this case, both read only and write only byte per sec
1188316cb3b1SGangCao 	 * rate limit will take effect.
11897191c4bdSGangCao 	 */
1190316cb3b1SGangCao 	/* 4000 read/write I/O per second, or 4 per millisecond */
1191316cb3b1SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 4000;
11927d030ef7SGangCao 	/* 8K byte per millisecond with 4K block size */
11937d030ef7SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
1194316cb3b1SGangCao 	/* 4K byte per millisecond with 4K block size */
1195316cb3b1SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT].limit = 4096000;
1196316cb3b1SGangCao 	/* 4K byte per millisecond with 4K block size */
1197316cb3b1SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 4096000;
1198d859e3ccSBen Walker 
1199d859e3ccSBen Walker 	g_get_io_channel = true;
1200d859e3ccSBen Walker 
1201d859e3ccSBen Walker 	/* Create channels */
1202a2142f3aSGangCao 	set_thread(0);
1203a2142f3aSGangCao 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
12042e1dbc45SBen Walker 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
12052e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1206a2142f3aSGangCao 
1207a2142f3aSGangCao 	set_thread(1);
1208a2142f3aSGangCao 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
12092e1dbc45SBen Walker 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
12102e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1211d859e3ccSBen Walker 
1212316cb3b1SGangCao 	/* Send two read I/Os */
1213a2142f3aSGangCao 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1214a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1215a2142f3aSGangCao 	CU_ASSERT(rc == 0);
1216a2142f3aSGangCao 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1217a2142f3aSGangCao 	set_thread(0);
1218a2142f3aSGangCao 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1219a2142f3aSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1220a2142f3aSGangCao 	CU_ASSERT(rc == 0);
1221a2142f3aSGangCao 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1222316cb3b1SGangCao 	/* Send one write I/O */
1223316cb3b1SGangCao 	status2 = SPDK_BDEV_IO_STATUS_PENDING;
1224316cb3b1SGangCao 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status2);
1225316cb3b1SGangCao 	CU_ASSERT(rc == 0);
1226316cb3b1SGangCao 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_PENDING);
1227a2142f3aSGangCao 
1228d859e3ccSBen Walker 	/* Complete any I/O that arrived at the disk */
1229a2142f3aSGangCao 	poll_threads();
1230d859e3ccSBen Walker 	set_thread(1);
1231a2142f3aSGangCao 	stub_complete_io(g_bdev.io_target, 0);
1232d859e3ccSBen Walker 	set_thread(0);
1233d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1234a2142f3aSGangCao 	poll_threads();
1235a2142f3aSGangCao 
1236316cb3b1SGangCao 	/* Only one of the two read I/Os should complete. (logical XOR) */
1237d859e3ccSBen Walker 	if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
1238d859e3ccSBen Walker 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1239d859e3ccSBen Walker 	} else {
1240d859e3ccSBen Walker 		CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1241d859e3ccSBen Walker 	}
1242316cb3b1SGangCao 	/* The write I/O should complete. */
1243316cb3b1SGangCao 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_SUCCESS);
1244d859e3ccSBen Walker 
1245d859e3ccSBen Walker 	/* Advance in time by a millisecond */
12463c981508SBen Walker 	spdk_delay_us(1000);
1247d859e3ccSBen Walker 
1248d859e3ccSBen Walker 	/* Complete more I/O */
1249d859e3ccSBen Walker 	poll_threads();
1250d859e3ccSBen Walker 	set_thread(1);
1251d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1252d859e3ccSBen Walker 	set_thread(0);
1253d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1254d859e3ccSBen Walker 	poll_threads();
1255d859e3ccSBen Walker 
1256316cb3b1SGangCao 	/* Now the second read I/O should be done */
1257a2142f3aSGangCao 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
1258a2142f3aSGangCao 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
1259a2142f3aSGangCao 
1260d859e3ccSBen Walker 	/* Tear down the channels */
1261d859e3ccSBen Walker 	set_thread(1);
1262d859e3ccSBen Walker 	spdk_put_io_channel(io_ch[1]);
1263d859e3ccSBen Walker 	set_thread(0);
1264d859e3ccSBen Walker 	spdk_put_io_channel(io_ch[0]);
1265d859e3ccSBen Walker 	poll_threads();
1266d859e3ccSBen Walker 
1267a2142f3aSGangCao 	teardown_test();
1268a2142f3aSGangCao }
1269a2142f3aSGangCao 
1270a2142f3aSGangCao static void
1271310f324eSGangCao io_during_qos_reset(void)
1272310f324eSGangCao {
1273d859e3ccSBen Walker 	struct spdk_io_channel *io_ch[2];
12742e1dbc45SBen Walker 	struct spdk_bdev_channel *bdev_ch[2];
1275310f324eSGangCao 	struct spdk_bdev *bdev;
1276d859e3ccSBen Walker 	enum spdk_bdev_io_status status0, status1, reset_status;
1277310f324eSGangCao 	int rc;
1278310f324eSGangCao 
1279310f324eSGangCao 	setup_test();
12803c981508SBen Walker 	MOCK_SET(spdk_get_ticks, 0);
1281310f324eSGangCao 
1282d859e3ccSBen Walker 	/* Enable QoS */
1283d859e3ccSBen Walker 	bdev = &g_bdev.bdev;
128486947c89SSeth Howell 	bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
128586947c89SSeth Howell 	SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
1286055de83aSAnton Nayshtut 
12877191c4bdSGangCao 	/*
1288316cb3b1SGangCao 	 * Enable read/write IOPS, write only byte per sec and
1289316cb3b1SGangCao 	 * read/write byte per second rate limits.
1290316cb3b1SGangCao 	 * In this case, read/write byte per second rate limit will
1291316cb3b1SGangCao 	 * take effect first.
12927191c4bdSGangCao 	 */
1293316cb3b1SGangCao 	/* 2000 read/write I/O per second, or 2 per millisecond */
12947d030ef7SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
12957d030ef7SGangCao 	/* 4K byte per millisecond with 4K block size */
12967d030ef7SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
1297316cb3b1SGangCao 	/* 8K byte per millisecond with 4K block size */
1298316cb3b1SGangCao 	bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT].limit = 8192000;
1299d859e3ccSBen Walker 
1300d859e3ccSBen Walker 	g_get_io_channel = true;
1301d859e3ccSBen Walker 
1302d859e3ccSBen Walker 	/* Create channels */
1303310f324eSGangCao 	set_thread(0);
1304310f324eSGangCao 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
13052e1dbc45SBen Walker 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
13062e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
1307310f324eSGangCao 
1308310f324eSGangCao 	set_thread(1);
1309310f324eSGangCao 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
13102e1dbc45SBen Walker 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
13112e1dbc45SBen Walker 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
1312d859e3ccSBen Walker 
1313d859e3ccSBen Walker 	/* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
1314310f324eSGangCao 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
1315316cb3b1SGangCao 	rc = spdk_bdev_write_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
1316310f324eSGangCao 	CU_ASSERT(rc == 0);
1317310f324eSGangCao 	set_thread(0);
1318310f324eSGangCao 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
1319316cb3b1SGangCao 	rc = spdk_bdev_write_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
1320310f324eSGangCao 	CU_ASSERT(rc == 0);
1321310f324eSGangCao 
1322d859e3ccSBen Walker 	poll_threads();
1323d859e3ccSBen Walker 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
1324d859e3ccSBen Walker 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
1325d859e3ccSBen Walker 
1326d859e3ccSBen Walker 	/* Reset the bdev. */
1327d859e3ccSBen Walker 	reset_status = SPDK_BDEV_IO_STATUS_PENDING;
1328d859e3ccSBen Walker 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
1329310f324eSGangCao 	CU_ASSERT(rc == 0);
1330310f324eSGangCao 
1331d859e3ccSBen Walker 	/* Complete any I/O that arrived at the disk */
1332310f324eSGangCao 	poll_threads();
1333d859e3ccSBen Walker 	set_thread(1);
1334d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1335d859e3ccSBen Walker 	set_thread(0);
1336d859e3ccSBen Walker 	stub_complete_io(g_bdev.io_target, 0);
1337d859e3ccSBen Walker 	poll_threads();
1338d859e3ccSBen Walker 
1339d859e3ccSBen Walker 	CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
13408d4b319cSShuhei Matsumoto 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_ABORTED);
13418d4b319cSShuhei Matsumoto 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_ABORTED);
1342310f324eSGangCao 
1343d859e3ccSBen Walker 	/* Tear down the channels */
1344310f324eSGangCao 	set_thread(1);
1345310f324eSGangCao 	spdk_put_io_channel(io_ch[1]);
1346d859e3ccSBen Walker 	set_thread(0);
1347d859e3ccSBen Walker 	spdk_put_io_channel(io_ch[0]);
1348310f324eSGangCao 	poll_threads();
1349310f324eSGangCao 
1350310f324eSGangCao 	teardown_test();
1351310f324eSGangCao }
1352310f324eSGangCao 
1353310f324eSGangCao static void
135494bc8cfdSJim Harris enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
135594bc8cfdSJim Harris {
135694bc8cfdSJim Harris 	enum spdk_bdev_io_status *status = cb_arg;
135794bc8cfdSJim Harris 
135894bc8cfdSJim Harris 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
135994bc8cfdSJim Harris 	spdk_bdev_free_io(bdev_io);
136094bc8cfdSJim Harris }
136194bc8cfdSJim Harris 
136294bc8cfdSJim Harris static void
136394bc8cfdSJim Harris enomem(void)
136494bc8cfdSJim Harris {
136594bc8cfdSJim Harris 	struct spdk_io_channel *io_ch;
136694bc8cfdSJim Harris 	struct spdk_bdev_channel *bdev_ch;
1367cc8bb51bSDariusz Stojaczyk 	struct spdk_bdev_shared_resource *shared_resource;
136894bc8cfdSJim Harris 	struct ut_bdev_channel *ut_ch;
136994bc8cfdSJim Harris 	const uint32_t IO_ARRAY_SIZE = 64;
137094bc8cfdSJim Harris 	const uint32_t AVAIL = 20;
137194bc8cfdSJim Harris 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
137294bc8cfdSJim Harris 	uint32_t nomem_cnt, i;
137394bc8cfdSJim Harris 	struct spdk_bdev_io *first_io;
137494bc8cfdSJim Harris 	int rc;
137594bc8cfdSJim Harris 
137694bc8cfdSJim Harris 	setup_test();
137794bc8cfdSJim Harris 
137894bc8cfdSJim Harris 	set_thread(0);
137994bc8cfdSJim Harris 	io_ch = spdk_bdev_get_io_channel(g_desc);
138094bc8cfdSJim Harris 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1381cc8bb51bSDariusz Stojaczyk 	shared_resource = bdev_ch->shared_resource;
138294bc8cfdSJim Harris 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
138394bc8cfdSJim Harris 	ut_ch->avail_cnt = AVAIL;
138494bc8cfdSJim Harris 
138594bc8cfdSJim Harris 	/* First submit a number of IOs equal to what the channel can support. */
138694bc8cfdSJim Harris 	for (i = 0; i < AVAIL; i++) {
138794bc8cfdSJim Harris 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
138894bc8cfdSJim Harris 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
138994bc8cfdSJim Harris 		CU_ASSERT(rc == 0);
139094bc8cfdSJim Harris 	}
1391cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
139294bc8cfdSJim Harris 
139394bc8cfdSJim Harris 	/*
139494bc8cfdSJim Harris 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
139594bc8cfdSJim Harris 	 *  the enomem_io list.
139694bc8cfdSJim Harris 	 */
139794bc8cfdSJim Harris 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
139894bc8cfdSJim Harris 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
139994bc8cfdSJim Harris 	CU_ASSERT(rc == 0);
1400cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1401cc8bb51bSDariusz Stojaczyk 	first_io = TAILQ_FIRST(&shared_resource->nomem_io);
140294bc8cfdSJim Harris 
140394bc8cfdSJim Harris 	/*
140494bc8cfdSJim Harris 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
140594bc8cfdSJim Harris 	 *  the first_io above.
140694bc8cfdSJim Harris 	 */
140794bc8cfdSJim Harris 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
140894bc8cfdSJim Harris 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
140994bc8cfdSJim Harris 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
141094bc8cfdSJim Harris 		CU_ASSERT(rc == 0);
141194bc8cfdSJim Harris 	}
141294bc8cfdSJim Harris 
141394bc8cfdSJim Harris 	/* Assert that first_io is still at the head of the list. */
1414cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
1415cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
1416cc8bb51bSDariusz Stojaczyk 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
1417cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
141894bc8cfdSJim Harris 
141994bc8cfdSJim Harris 	/*
142094bc8cfdSJim Harris 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
142194bc8cfdSJim Harris 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
142294bc8cfdSJim Harris 	 *  list.
142394bc8cfdSJim Harris 	 */
14245e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 1);
1425cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
142694bc8cfdSJim Harris 
142794bc8cfdSJim Harris 	/*
1428c9c7c281SJosh Soref 	 * Complete enough I/O to hit the nomem_threshold.  This should trigger retrying nomem_io,
142994bc8cfdSJim Harris 	 *  and we should see I/O get resubmitted to the test bdev module.
143094bc8cfdSJim Harris 	 */
14315e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1432cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
1433cc8bb51bSDariusz Stojaczyk 	nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
143494bc8cfdSJim Harris 
143594bc8cfdSJim Harris 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
14365e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 1);
1437cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
143894bc8cfdSJim Harris 
143994bc8cfdSJim Harris 	/*
144094bc8cfdSJim Harris 	 * Send a reset and confirm that all I/O are completed, including the ones that
144194bc8cfdSJim Harris 	 *  were queued on the nomem_io list.
144294bc8cfdSJim Harris 	 */
144394bc8cfdSJim Harris 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
144494bc8cfdSJim Harris 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
144594bc8cfdSJim Harris 	poll_threads();
144694bc8cfdSJim Harris 	CU_ASSERT(rc == 0);
144794bc8cfdSJim Harris 	/* This will complete the reset. */
14485e799ff4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 0);
144994bc8cfdSJim Harris 
1450cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1451cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(shared_resource->io_outstanding == 0);
145294bc8cfdSJim Harris 
145394bc8cfdSJim Harris 	spdk_put_io_channel(io_ch);
145494bc8cfdSJim Harris 	poll_threads();
145594bc8cfdSJim Harris 	teardown_test();
145694bc8cfdSJim Harris }
145794bc8cfdSJim Harris 
1458583a24a4SDariusz Stojaczyk static void
1459583a24a4SDariusz Stojaczyk enomem_multi_bdev(void)
1460583a24a4SDariusz Stojaczyk {
1461583a24a4SDariusz Stojaczyk 	struct spdk_io_channel *io_ch;
1462583a24a4SDariusz Stojaczyk 	struct spdk_bdev_channel *bdev_ch;
1463cc8bb51bSDariusz Stojaczyk 	struct spdk_bdev_shared_resource *shared_resource;
1464583a24a4SDariusz Stojaczyk 	struct ut_bdev_channel *ut_ch;
1465583a24a4SDariusz Stojaczyk 	const uint32_t IO_ARRAY_SIZE = 64;
1466583a24a4SDariusz Stojaczyk 	const uint32_t AVAIL = 20;
1467583a24a4SDariusz Stojaczyk 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1468583a24a4SDariusz Stojaczyk 	uint32_t i;
1469583a24a4SDariusz Stojaczyk 	struct ut_bdev *second_bdev;
14700a4a217cSJim Harris 	struct spdk_bdev_desc *second_desc = NULL;
1471583a24a4SDariusz Stojaczyk 	struct spdk_bdev_channel *second_bdev_ch;
1472583a24a4SDariusz Stojaczyk 	struct spdk_io_channel *second_ch;
1473583a24a4SDariusz Stojaczyk 	int rc;
1474583a24a4SDariusz Stojaczyk 
1475583a24a4SDariusz Stojaczyk 	setup_test();
1476583a24a4SDariusz Stojaczyk 
1477583a24a4SDariusz Stojaczyk 	/* Register second bdev with the same io_target  */
1478583a24a4SDariusz Stojaczyk 	second_bdev = calloc(1, sizeof(*second_bdev));
1479583a24a4SDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1480583a24a4SDariusz Stojaczyk 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
14816a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
14820a4a217cSJim Harris 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1483583a24a4SDariusz Stojaczyk 
1484583a24a4SDariusz Stojaczyk 	set_thread(0);
1485583a24a4SDariusz Stojaczyk 	io_ch = spdk_bdev_get_io_channel(g_desc);
1486583a24a4SDariusz Stojaczyk 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1487cc8bb51bSDariusz Stojaczyk 	shared_resource = bdev_ch->shared_resource;
1488583a24a4SDariusz Stojaczyk 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1489583a24a4SDariusz Stojaczyk 	ut_ch->avail_cnt = AVAIL;
1490583a24a4SDariusz Stojaczyk 
1491583a24a4SDariusz Stojaczyk 	second_ch = spdk_bdev_get_io_channel(second_desc);
1492583a24a4SDariusz Stojaczyk 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1493cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
1494583a24a4SDariusz Stojaczyk 
1495583a24a4SDariusz Stojaczyk 	/* Saturate io_target through bdev A. */
1496583a24a4SDariusz Stojaczyk 	for (i = 0; i < AVAIL; i++) {
1497583a24a4SDariusz Stojaczyk 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1498583a24a4SDariusz Stojaczyk 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1499583a24a4SDariusz Stojaczyk 		CU_ASSERT(rc == 0);
1500583a24a4SDariusz Stojaczyk 	}
1501cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
1502583a24a4SDariusz Stojaczyk 
1503583a24a4SDariusz Stojaczyk 	/*
1504583a24a4SDariusz Stojaczyk 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1505583a24a4SDariusz Stojaczyk 	 * and then go onto the nomem_io list.
1506583a24a4SDariusz Stojaczyk 	 */
1507583a24a4SDariusz Stojaczyk 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1508583a24a4SDariusz Stojaczyk 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1509583a24a4SDariusz Stojaczyk 	CU_ASSERT(rc == 0);
1510cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
1511583a24a4SDariusz Stojaczyk 
1512583a24a4SDariusz Stojaczyk 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1513583a24a4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, AVAIL);
1514583a24a4SDariusz Stojaczyk 
1515cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
1516cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(shared_resource->io_outstanding == 1);
1517583a24a4SDariusz Stojaczyk 
1518583a24a4SDariusz Stojaczyk 	/* Now complete our retried I/O  */
1519583a24a4SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 1);
1520cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
1521583a24a4SDariusz Stojaczyk 
1522583a24a4SDariusz Stojaczyk 	spdk_put_io_channel(io_ch);
1523583a24a4SDariusz Stojaczyk 	spdk_put_io_channel(second_ch);
1524583a24a4SDariusz Stojaczyk 	spdk_bdev_close(second_desc);
1525583a24a4SDariusz Stojaczyk 	unregister_bdev(second_bdev);
1526583a24a4SDariusz Stojaczyk 	poll_threads();
1527ce6a7cd8SSeth Howell 	free(second_bdev);
1528583a24a4SDariusz Stojaczyk 	teardown_test();
1529583a24a4SDariusz Stojaczyk }
1530583a24a4SDariusz Stojaczyk 
15317bcd316dSGangCao static void
15327bcd316dSGangCao enomem_multi_bdev_unregister(void)
15337bcd316dSGangCao {
15347bcd316dSGangCao 	struct spdk_io_channel *io_ch;
15357bcd316dSGangCao 	struct spdk_bdev_channel *bdev_ch;
15367bcd316dSGangCao 	struct spdk_bdev_shared_resource *shared_resource;
15377bcd316dSGangCao 	struct ut_bdev_channel *ut_ch;
15387bcd316dSGangCao 	const uint32_t IO_ARRAY_SIZE = 64;
15397bcd316dSGangCao 	const uint32_t AVAIL = 20;
15407bcd316dSGangCao 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
15417bcd316dSGangCao 	uint32_t i;
15427bcd316dSGangCao 	int rc;
15437bcd316dSGangCao 
15447bcd316dSGangCao 	setup_test();
15457bcd316dSGangCao 
15467bcd316dSGangCao 	set_thread(0);
15477bcd316dSGangCao 	io_ch = spdk_bdev_get_io_channel(g_desc);
15487bcd316dSGangCao 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
15497bcd316dSGangCao 	shared_resource = bdev_ch->shared_resource;
15507bcd316dSGangCao 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
15517bcd316dSGangCao 	ut_ch->avail_cnt = AVAIL;
15527bcd316dSGangCao 
15537bcd316dSGangCao 	/* Saturate io_target through the bdev. */
15547bcd316dSGangCao 	for (i = 0; i < AVAIL; i++) {
15557bcd316dSGangCao 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
15567bcd316dSGangCao 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
15577bcd316dSGangCao 		CU_ASSERT(rc == 0);
15587bcd316dSGangCao 	}
15597bcd316dSGangCao 	CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
15607bcd316dSGangCao 
15617bcd316dSGangCao 	/*
15627bcd316dSGangCao 	 * Now submit I/O through the bdev. This should fail with ENOMEM
15637bcd316dSGangCao 	 * and then go onto the nomem_io list.
15647bcd316dSGangCao 	 */
15657bcd316dSGangCao 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
15667bcd316dSGangCao 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
15677bcd316dSGangCao 	CU_ASSERT(rc == 0);
15687bcd316dSGangCao 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
15697bcd316dSGangCao 
15707bcd316dSGangCao 	/* Unregister the bdev to abort the IOs from nomem_io queue. */
15717bcd316dSGangCao 	unregister_bdev(&g_bdev);
15727bcd316dSGangCao 	CU_ASSERT(status[AVAIL] == SPDK_BDEV_IO_STATUS_FAILED);
15737bcd316dSGangCao 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
15747bcd316dSGangCao 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == AVAIL);
15757bcd316dSGangCao 
15767bcd316dSGangCao 	/* Complete the bdev's I/O. */
15777bcd316dSGangCao 	stub_complete_io(g_bdev.io_target, AVAIL);
15787bcd316dSGangCao 	SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
15797bcd316dSGangCao 
15807bcd316dSGangCao 	spdk_put_io_channel(io_ch);
15817bcd316dSGangCao 	poll_threads();
15827bcd316dSGangCao 	teardown_test();
15837bcd316dSGangCao }
1584cc8bb51bSDariusz Stojaczyk 
1585e18d2b76SBen Walker static void
1586bfb73837SDariusz Stojaczyk enomem_multi_io_target(void)
1587bfb73837SDariusz Stojaczyk {
1588bfb73837SDariusz Stojaczyk 	struct spdk_io_channel *io_ch;
1589bfb73837SDariusz Stojaczyk 	struct spdk_bdev_channel *bdev_ch;
1590bfb73837SDariusz Stojaczyk 	struct ut_bdev_channel *ut_ch;
1591bfb73837SDariusz Stojaczyk 	const uint32_t IO_ARRAY_SIZE = 64;
1592bfb73837SDariusz Stojaczyk 	const uint32_t AVAIL = 20;
1593bfb73837SDariusz Stojaczyk 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1594bfb73837SDariusz Stojaczyk 	uint32_t i;
1595bfb73837SDariusz Stojaczyk 	int new_io_device;
1596bfb73837SDariusz Stojaczyk 	struct ut_bdev *second_bdev;
159711ccf3beSSeth Howell 	struct spdk_bdev_desc *second_desc = NULL;
1598bfb73837SDariusz Stojaczyk 	struct spdk_bdev_channel *second_bdev_ch;
1599bfb73837SDariusz Stojaczyk 	struct spdk_io_channel *second_ch;
1600bfb73837SDariusz Stojaczyk 	int rc;
1601bfb73837SDariusz Stojaczyk 
1602bfb73837SDariusz Stojaczyk 	setup_test();
1603bfb73837SDariusz Stojaczyk 
1604bfb73837SDariusz Stojaczyk 	/* Create new io_target and a second bdev using it */
1605bfb73837SDariusz Stojaczyk 	spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
1606c9402000SBen Walker 				sizeof(struct ut_bdev_channel), NULL);
1607bfb73837SDariusz Stojaczyk 	second_bdev = calloc(1, sizeof(*second_bdev));
1608bfb73837SDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1609bfb73837SDariusz Stojaczyk 	register_bdev(second_bdev, "ut_bdev2", &new_io_device);
16106a684a34SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev2", true, _bdev_event_cb, NULL, &second_desc);
161111ccf3beSSeth Howell 	SPDK_CU_ASSERT_FATAL(second_desc != NULL);
1612bfb73837SDariusz Stojaczyk 
1613bfb73837SDariusz Stojaczyk 	set_thread(0);
1614bfb73837SDariusz Stojaczyk 	io_ch = spdk_bdev_get_io_channel(g_desc);
1615bfb73837SDariusz Stojaczyk 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1616bfb73837SDariusz Stojaczyk 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1617bfb73837SDariusz Stojaczyk 	ut_ch->avail_cnt = AVAIL;
1618bfb73837SDariusz Stojaczyk 
1619cc8bb51bSDariusz Stojaczyk 	/* Different io_target should imply a different shared_resource */
1620bfb73837SDariusz Stojaczyk 	second_ch = spdk_bdev_get_io_channel(second_desc);
1621bfb73837SDariusz Stojaczyk 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1622cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
1623bfb73837SDariusz Stojaczyk 
1624bfb73837SDariusz Stojaczyk 	/* Saturate io_target through bdev A. */
1625bfb73837SDariusz Stojaczyk 	for (i = 0; i < AVAIL; i++) {
1626bfb73837SDariusz Stojaczyk 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1627bfb73837SDariusz Stojaczyk 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1628bfb73837SDariusz Stojaczyk 		CU_ASSERT(rc == 0);
1629bfb73837SDariusz Stojaczyk 	}
1630cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1631bfb73837SDariusz Stojaczyk 
1632bfb73837SDariusz Stojaczyk 	/* Issue one more I/O to fill ENOMEM list. */
1633bfb73837SDariusz Stojaczyk 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1634bfb73837SDariusz Stojaczyk 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1635bfb73837SDariusz Stojaczyk 	CU_ASSERT(rc == 0);
1636cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1637bfb73837SDariusz Stojaczyk 
1638bfb73837SDariusz Stojaczyk 	/*
1639bfb73837SDariusz Stojaczyk 	 * Now submit I/O through the second bdev. This should go through and complete
1640bfb73837SDariusz Stojaczyk 	 * successfully because we're using a different io_device underneath.
1641bfb73837SDariusz Stojaczyk 	 */
1642bfb73837SDariusz Stojaczyk 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1643bfb73837SDariusz Stojaczyk 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1644bfb73837SDariusz Stojaczyk 	CU_ASSERT(rc == 0);
1645cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
1646bfb73837SDariusz Stojaczyk 	stub_complete_io(second_bdev->io_target, 1);
1647bfb73837SDariusz Stojaczyk 
1648bfb73837SDariusz Stojaczyk 	/* Cleanup; Complete outstanding I/O. */
1649bfb73837SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, AVAIL);
1650cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1651bfb73837SDariusz Stojaczyk 	/* Complete the ENOMEM I/O */
1652bfb73837SDariusz Stojaczyk 	stub_complete_io(g_bdev.io_target, 1);
1653cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1654bfb73837SDariusz Stojaczyk 
1655cc8bb51bSDariusz Stojaczyk 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
1656cc8bb51bSDariusz Stojaczyk 	CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
1657bfb73837SDariusz Stojaczyk 	spdk_put_io_channel(io_ch);
1658bfb73837SDariusz Stojaczyk 	spdk_put_io_channel(second_ch);
1659bfb73837SDariusz Stojaczyk 	spdk_bdev_close(second_desc);
1660bfb73837SDariusz Stojaczyk 	unregister_bdev(second_bdev);
1661bfb73837SDariusz Stojaczyk 	spdk_io_device_unregister(&new_io_device, NULL);
1662bfb73837SDariusz Stojaczyk 	poll_threads();
1663bfb73837SDariusz Stojaczyk 	free(second_bdev);
1664bfb73837SDariusz Stojaczyk 	teardown_test();
1665bfb73837SDariusz Stojaczyk }
1666bfb73837SDariusz Stojaczyk 
1667bfb73837SDariusz Stojaczyk static void
1668*52a41348SJinlong Chen enomem_retry_during_abort(void)
1669*52a41348SJinlong Chen {
1670*52a41348SJinlong Chen 	struct spdk_io_channel *io_ch;
1671*52a41348SJinlong Chen 	struct spdk_bdev_channel *bdev_ch;
1672*52a41348SJinlong Chen 	struct spdk_bdev_shared_resource *shared_resource;
1673*52a41348SJinlong Chen 	struct ut_bdev_channel *ut_ch;
1674*52a41348SJinlong Chen 	const uint32_t IO_ARRAY_SIZE = 16;
1675*52a41348SJinlong Chen 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
1676*52a41348SJinlong Chen 	uint32_t i;
1677*52a41348SJinlong Chen 	int rc;
1678*52a41348SJinlong Chen 
1679*52a41348SJinlong Chen 	setup_test();
1680*52a41348SJinlong Chen 
1681*52a41348SJinlong Chen 	set_thread(0);
1682*52a41348SJinlong Chen 	io_ch = spdk_bdev_get_io_channel(g_desc);
1683*52a41348SJinlong Chen 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1684*52a41348SJinlong Chen 	shared_resource = bdev_ch->shared_resource;
1685*52a41348SJinlong Chen 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1686*52a41348SJinlong Chen 	ut_ch->avail_cnt = 0;
1687*52a41348SJinlong Chen 
1688*52a41348SJinlong Chen 	/**
1689*52a41348SJinlong Chen 	 * Submit a number of IOs.
1690*52a41348SJinlong Chen 	 * All of these I/Os will queue in nomem_io list due to ut_ch->avail_cnt == 0.
1691*52a41348SJinlong Chen 	 */
1692*52a41348SJinlong Chen 	for (i = 0; i < IO_ARRAY_SIZE; i++) {
1693*52a41348SJinlong Chen 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1694*52a41348SJinlong Chen 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1695*52a41348SJinlong Chen 		CU_ASSERT(rc == 0);
1696*52a41348SJinlong Chen 	}
1697*52a41348SJinlong Chen 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == IO_ARRAY_SIZE);
1698*52a41348SJinlong Chen 	CU_ASSERT(shared_resource->io_outstanding == 0);
1699*52a41348SJinlong Chen 
1700*52a41348SJinlong Chen 	/* Allow some I/Os to be submitted. */
1701*52a41348SJinlong Chen 	ut_ch->avail_cnt = IO_ARRAY_SIZE / 2;
1702*52a41348SJinlong Chen 
1703*52a41348SJinlong Chen 	/* Submit a reset to abort the I/Os. */
1704*52a41348SJinlong Chen 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1705*52a41348SJinlong Chen 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1706*52a41348SJinlong Chen 	poll_threads();
1707*52a41348SJinlong Chen 	CU_ASSERT(rc == 0);
1708*52a41348SJinlong Chen 
1709*52a41348SJinlong Chen 	/* Complete the reset. */
1710*52a41348SJinlong Chen 	stub_complete_io(g_bdev.io_target, 1);
1711*52a41348SJinlong Chen 	poll_threads();
1712*52a41348SJinlong Chen 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
1713*52a41348SJinlong Chen 
1714*52a41348SJinlong Chen 	/* All I/Os are aborted. */
1715*52a41348SJinlong Chen 	for (i = 0; i < IO_ARRAY_SIZE; i++) {
1716*52a41348SJinlong Chen 		CU_ASSERT(status[i] == SPDK_BDEV_IO_STATUS_FAILED);
1717*52a41348SJinlong Chen 	}
1718*52a41348SJinlong Chen 
1719*52a41348SJinlong Chen 	CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
1720*52a41348SJinlong Chen 	CU_ASSERT(shared_resource->io_outstanding == 0);
1721*52a41348SJinlong Chen 
1722*52a41348SJinlong Chen 	spdk_put_io_channel(io_ch);
1723*52a41348SJinlong Chen 	poll_threads();
1724*52a41348SJinlong Chen 	teardown_test();
1725*52a41348SJinlong Chen }
1726*52a41348SJinlong Chen 
1727*52a41348SJinlong Chen static void
1728e18d2b76SBen Walker qos_dynamic_enable_done(void *cb_arg, int status)
1729e18d2b76SBen Walker {
1730e18d2b76SBen Walker 	int *rc = cb_arg;
1731e18d2b76SBen Walker 	*rc = status;
1732e18d2b76SBen Walker }
1733e18d2b76SBen Walker 
1734e18d2b76SBen Walker static void
1735e18d2b76SBen Walker qos_dynamic_enable(void)
1736e18d2b76SBen Walker {
1737e18d2b76SBen Walker 	struct spdk_io_channel *io_ch[2];
1738e18d2b76SBen Walker 	struct spdk_bdev_channel *bdev_ch[2];
1739e18d2b76SBen Walker 	struct spdk_bdev *bdev;
1740140eaaa0SJim Harris 	enum spdk_bdev_io_status bdev_io_status[2];
17417d030ef7SGangCao 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
1742140eaaa0SJim Harris 	int status, second_status, rc, i;
1743e18d2b76SBen Walker 
1744e18d2b76SBen Walker 	setup_test();
17453c981508SBen Walker 	MOCK_SET(spdk_get_ticks, 0);
1746e18d2b76SBen Walker 
17477d030ef7SGangCao 	for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
17487d030ef7SGangCao 		limits[i] = UINT64_MAX;
17497d030ef7SGangCao 	}
17507d030ef7SGangCao 
1751e18d2b76SBen Walker 	bdev = &g_bdev.bdev;
1752e18d2b76SBen Walker 
1753e18d2b76SBen Walker 	g_get_io_channel = true;
1754e18d2b76SBen Walker 
1755e18d2b76SBen Walker 	/* Create channels */
1756e18d2b76SBen Walker 	set_thread(0);
1757e18d2b76SBen Walker 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
1758e18d2b76SBen Walker 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
1759e18d2b76SBen Walker 	CU_ASSERT(bdev_ch[0]->flags == 0);
1760e18d2b76SBen Walker 
1761e18d2b76SBen Walker 	set_thread(1);
1762e18d2b76SBen Walker 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
1763e18d2b76SBen Walker 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
1764e18d2b76SBen Walker 	CU_ASSERT(bdev_ch[1]->flags == 0);
1765e18d2b76SBen Walker 
1766e18d2b76SBen Walker 	set_thread(0);
1767e18d2b76SBen Walker 
17687d030ef7SGangCao 	/*
1769316cb3b1SGangCao 	 * Enable QoS: Read/Write IOPS, Read/Write byte,
1770316cb3b1SGangCao 	 * Read only byte and Write only byte per second
1771316cb3b1SGangCao 	 * rate limits.
17727d030ef7SGangCao 	 * More than 10 I/Os allowed per timeslice.
17737d030ef7SGangCao 	 */
1774e18d2b76SBen Walker 	status = -1;
17757d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
17767d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
1777316cb3b1SGangCao 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 100;
1778316cb3b1SGangCao 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 10;
17797d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1780e18d2b76SBen Walker 	poll_threads();
1781e18d2b76SBen Walker 	CU_ASSERT(status == 0);
1782e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1783e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1784e18d2b76SBen Walker 
1785140eaaa0SJim Harris 	/*
1786140eaaa0SJim Harris 	 * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
1787140eaaa0SJim Harris 	 * Additional I/O will then be queued.
1788140eaaa0SJim Harris 	 */
1789140eaaa0SJim Harris 	set_thread(0);
1790140eaaa0SJim Harris 	for (i = 0; i < 10; i++) {
1791140eaaa0SJim Harris 		bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1792140eaaa0SJim Harris 		rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1793140eaaa0SJim Harris 		CU_ASSERT(rc == 0);
1794140eaaa0SJim Harris 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1795140eaaa0SJim Harris 		poll_thread(0);
1796140eaaa0SJim Harris 		stub_complete_io(g_bdev.io_target, 0);
1797140eaaa0SJim Harris 		CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1798140eaaa0SJim Harris 	}
1799140eaaa0SJim Harris 
1800140eaaa0SJim Harris 	/*
1801140eaaa0SJim Harris 	 * Send two more I/O.  These I/O will be queued since the current timeslice allotment has been
1802140eaaa0SJim Harris 	 * filled already.  We want to test that when QoS is disabled that these two I/O:
1803140eaaa0SJim Harris 	 *  1) are not aborted
1804140eaaa0SJim Harris 	 *  2) are sent back to their original thread for resubmission
1805140eaaa0SJim Harris 	 */
1806140eaaa0SJim Harris 	bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
1807140eaaa0SJim Harris 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
1808140eaaa0SJim Harris 	CU_ASSERT(rc == 0);
1809140eaaa0SJim Harris 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
1810140eaaa0SJim Harris 	set_thread(1);
1811140eaaa0SJim Harris 	bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
1812140eaaa0SJim Harris 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
1813140eaaa0SJim Harris 	CU_ASSERT(rc == 0);
1814140eaaa0SJim Harris 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1815140eaaa0SJim Harris 	poll_threads();
1816140eaaa0SJim Harris 
1817316cb3b1SGangCao 	/*
1818316cb3b1SGangCao 	 * Disable QoS: Read/Write IOPS, Read/Write byte,
1819316cb3b1SGangCao 	 * Read only byte rate limits
1820316cb3b1SGangCao 	 */
1821e18d2b76SBen Walker 	status = -1;
18227d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
1823316cb3b1SGangCao 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
1824316cb3b1SGangCao 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
18257d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
18267d030ef7SGangCao 	poll_threads();
18277d030ef7SGangCao 	CU_ASSERT(status == 0);
18287d030ef7SGangCao 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
18297d030ef7SGangCao 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
18307d030ef7SGangCao 
1831316cb3b1SGangCao 	/* Disable QoS: Write only Byte per second rate limit */
18327d030ef7SGangCao 	status = -1;
1833316cb3b1SGangCao 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
18347d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1835e18d2b76SBen Walker 	poll_threads();
1836e18d2b76SBen Walker 	CU_ASSERT(status == 0);
1837e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1838e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1839e18d2b76SBen Walker 
1840140eaaa0SJim Harris 	/*
1841140eaaa0SJim Harris 	 * All I/O should have been resubmitted back on their original thread.  Complete
1842140eaaa0SJim Harris 	 *  all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
1843140eaaa0SJim Harris 	 */
1844140eaaa0SJim Harris 	set_thread(0);
1845140eaaa0SJim Harris 	stub_complete_io(g_bdev.io_target, 0);
1846140eaaa0SJim Harris 	poll_threads();
1847140eaaa0SJim Harris 	CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
1848140eaaa0SJim Harris 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
1849140eaaa0SJim Harris 
1850140eaaa0SJim Harris 	/* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
1851140eaaa0SJim Harris 	set_thread(1);
1852140eaaa0SJim Harris 	stub_complete_io(g_bdev.io_target, 0);
1853140eaaa0SJim Harris 	poll_threads();
1854140eaaa0SJim Harris 	CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
1855140eaaa0SJim Harris 
1856e18d2b76SBen Walker 	/* Disable QoS again */
1857e18d2b76SBen Walker 	status = -1;
18587d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
18597d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1860e18d2b76SBen Walker 	poll_threads();
1861e18d2b76SBen Walker 	CU_ASSERT(status == 0); /* This should succeed */
1862e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1863e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1864e18d2b76SBen Walker 
1865e18d2b76SBen Walker 	/* Enable QoS on thread 0 */
1866e18d2b76SBen Walker 	status = -1;
18677d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
18687d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1869e18d2b76SBen Walker 	poll_threads();
1870e18d2b76SBen Walker 	CU_ASSERT(status == 0);
1871e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1872e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1873e18d2b76SBen Walker 
1874e18d2b76SBen Walker 	/* Disable QoS on thread 1 */
1875e18d2b76SBen Walker 	set_thread(1);
1876e18d2b76SBen Walker 	status = -1;
18777d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
18787d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1879e18d2b76SBen Walker 	/* Don't poll yet. This should leave the channels with QoS enabled */
1880e18d2b76SBen Walker 	CU_ASSERT(status == -1);
1881e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1882e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1883e18d2b76SBen Walker 
1884e18d2b76SBen Walker 	/* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
1885e18d2b76SBen Walker 	second_status = 0;
18867d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
18877d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
1888e18d2b76SBen Walker 	poll_threads();
1889e18d2b76SBen Walker 	CU_ASSERT(status == 0); /* The disable should succeed */
1890e18d2b76SBen Walker 	CU_ASSERT(second_status < 0); /* The enable should fail */
1891e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
1892e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
1893e18d2b76SBen Walker 
1894e18d2b76SBen Walker 	/* Enable QoS on thread 1. This should succeed now that the disable has completed. */
1895e18d2b76SBen Walker 	status = -1;
18967d030ef7SGangCao 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
18977d030ef7SGangCao 	spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
1898e18d2b76SBen Walker 	poll_threads();
1899e18d2b76SBen Walker 	CU_ASSERT(status == 0);
1900e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
1901e18d2b76SBen Walker 	CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
1902e18d2b76SBen Walker 
1903e18d2b76SBen Walker 	/* Tear down the channels */
1904e18d2b76SBen Walker 	set_thread(0);
1905e18d2b76SBen Walker 	spdk_put_io_channel(io_ch[0]);
1906e18d2b76SBen Walker 	set_thread(1);
1907e18d2b76SBen Walker 	spdk_put_io_channel(io_ch[1]);
1908e18d2b76SBen Walker 	poll_threads();
1909e18d2b76SBen Walker 
1910e18d2b76SBen Walker 	set_thread(0);
1911e18d2b76SBen Walker 	teardown_test();
1912e18d2b76SBen Walker }
1913e18d2b76SBen Walker 
191442dba604SPiotr Pelplinski static void
191542dba604SPiotr Pelplinski histogram_status_cb(void *cb_arg, int status)
191642dba604SPiotr Pelplinski {
191742dba604SPiotr Pelplinski 	g_status = status;
191842dba604SPiotr Pelplinski }
191942dba604SPiotr Pelplinski 
192042dba604SPiotr Pelplinski static void
192142dba604SPiotr Pelplinski histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data *histogram)
192242dba604SPiotr Pelplinski {
192342dba604SPiotr Pelplinski 	g_status = status;
192442dba604SPiotr Pelplinski 	g_histogram = histogram;
192542dba604SPiotr Pelplinski }
192642dba604SPiotr Pelplinski 
192742dba604SPiotr Pelplinski static void
192842dba604SPiotr Pelplinski histogram_io_count(void *ctx, uint64_t start, uint64_t end, uint64_t count,
192942dba604SPiotr Pelplinski 		   uint64_t total, uint64_t so_far)
193042dba604SPiotr Pelplinski {
193142dba604SPiotr Pelplinski 	g_count += count;
193242dba604SPiotr Pelplinski }
193342dba604SPiotr Pelplinski 
193442dba604SPiotr Pelplinski static void
193542dba604SPiotr Pelplinski bdev_histograms_mt(void)
193642dba604SPiotr Pelplinski {
193742dba604SPiotr Pelplinski 	struct spdk_io_channel *ch[2];
193842dba604SPiotr Pelplinski 	struct spdk_histogram_data *histogram;
193942dba604SPiotr Pelplinski 	uint8_t buf[4096];
194042dba604SPiotr Pelplinski 	int status = false;
194142dba604SPiotr Pelplinski 	int rc;
194242dba604SPiotr Pelplinski 
194342dba604SPiotr Pelplinski 
194442dba604SPiotr Pelplinski 	setup_test();
194542dba604SPiotr Pelplinski 
194642dba604SPiotr Pelplinski 	set_thread(0);
194742dba604SPiotr Pelplinski 	ch[0] = spdk_bdev_get_io_channel(g_desc);
194842dba604SPiotr Pelplinski 	CU_ASSERT(ch[0] != NULL);
194942dba604SPiotr Pelplinski 
195042dba604SPiotr Pelplinski 	set_thread(1);
195142dba604SPiotr Pelplinski 	ch[1] = spdk_bdev_get_io_channel(g_desc);
195242dba604SPiotr Pelplinski 	CU_ASSERT(ch[1] != NULL);
195342dba604SPiotr Pelplinski 
195442dba604SPiotr Pelplinski 
195542dba604SPiotr Pelplinski 	/* Enable histogram */
195642dba604SPiotr Pelplinski 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, true);
195742dba604SPiotr Pelplinski 	poll_threads();
195842dba604SPiotr Pelplinski 	CU_ASSERT(g_status == 0);
195942dba604SPiotr Pelplinski 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
196042dba604SPiotr Pelplinski 
196142dba604SPiotr Pelplinski 	/* Allocate histogram */
196242dba604SPiotr Pelplinski 	histogram = spdk_histogram_data_alloc();
196342dba604SPiotr Pelplinski 
196442dba604SPiotr Pelplinski 	/* Check if histogram is zeroed */
196542dba604SPiotr Pelplinski 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
196642dba604SPiotr Pelplinski 	poll_threads();
196742dba604SPiotr Pelplinski 	CU_ASSERT(g_status == 0);
196842dba604SPiotr Pelplinski 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
196942dba604SPiotr Pelplinski 
197042dba604SPiotr Pelplinski 	g_count = 0;
197142dba604SPiotr Pelplinski 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
197242dba604SPiotr Pelplinski 
197342dba604SPiotr Pelplinski 	CU_ASSERT(g_count == 0);
197442dba604SPiotr Pelplinski 
197542dba604SPiotr Pelplinski 	set_thread(0);
197642dba604SPiotr Pelplinski 	rc = spdk_bdev_write_blocks(g_desc, ch[0], &buf, 0, 1, io_during_io_done, &status);
197742dba604SPiotr Pelplinski 	CU_ASSERT(rc == 0);
197842dba604SPiotr Pelplinski 
197942dba604SPiotr Pelplinski 	spdk_delay_us(10);
198042dba604SPiotr Pelplinski 	stub_complete_io(g_bdev.io_target, 1);
198142dba604SPiotr Pelplinski 	poll_threads();
198242dba604SPiotr Pelplinski 	CU_ASSERT(status == true);
198342dba604SPiotr Pelplinski 
198442dba604SPiotr Pelplinski 
198542dba604SPiotr Pelplinski 	set_thread(1);
198642dba604SPiotr Pelplinski 	rc = spdk_bdev_read_blocks(g_desc, ch[1], &buf, 0, 1, io_during_io_done, &status);
198742dba604SPiotr Pelplinski 	CU_ASSERT(rc == 0);
198842dba604SPiotr Pelplinski 
198942dba604SPiotr Pelplinski 	spdk_delay_us(10);
199042dba604SPiotr Pelplinski 	stub_complete_io(g_bdev.io_target, 1);
199142dba604SPiotr Pelplinski 	poll_threads();
199242dba604SPiotr Pelplinski 	CU_ASSERT(status == true);
199342dba604SPiotr Pelplinski 
199442dba604SPiotr Pelplinski 	set_thread(0);
199542dba604SPiotr Pelplinski 
199642dba604SPiotr Pelplinski 	/* Check if histogram gathered data from all I/O channels */
199742dba604SPiotr Pelplinski 	spdk_bdev_histogram_get(&g_bdev.bdev, histogram, histogram_data_cb, NULL);
199842dba604SPiotr Pelplinski 	poll_threads();
199942dba604SPiotr Pelplinski 	CU_ASSERT(g_status == 0);
200042dba604SPiotr Pelplinski 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == true);
200142dba604SPiotr Pelplinski 	SPDK_CU_ASSERT_FATAL(g_histogram != NULL);
200242dba604SPiotr Pelplinski 
200342dba604SPiotr Pelplinski 	g_count = 0;
200442dba604SPiotr Pelplinski 	spdk_histogram_data_iterate(g_histogram, histogram_io_count, NULL);
200542dba604SPiotr Pelplinski 	CU_ASSERT(g_count == 2);
200642dba604SPiotr Pelplinski 
200742dba604SPiotr Pelplinski 	/* Disable histogram */
200842dba604SPiotr Pelplinski 	spdk_bdev_histogram_enable(&g_bdev.bdev, histogram_status_cb, NULL, false);
200942dba604SPiotr Pelplinski 	poll_threads();
201042dba604SPiotr Pelplinski 	CU_ASSERT(g_status == 0);
201142dba604SPiotr Pelplinski 	CU_ASSERT(g_bdev.bdev.internal.histogram_enabled == false);
201242dba604SPiotr Pelplinski 
2013e3b90fe2Syidong0635 	spdk_histogram_data_free(histogram);
201490513d3cSyidong0635 
201590513d3cSyidong0635 	/* Tear down the channels */
201690513d3cSyidong0635 	set_thread(0);
201790513d3cSyidong0635 	spdk_put_io_channel(ch[0]);
201890513d3cSyidong0635 	set_thread(1);
201990513d3cSyidong0635 	spdk_put_io_channel(ch[1]);
202090513d3cSyidong0635 	poll_threads();
202190513d3cSyidong0635 	set_thread(0);
202290513d3cSyidong0635 	teardown_test();
202390513d3cSyidong0635 
202442dba604SPiotr Pelplinski }
202542dba604SPiotr Pelplinski 
2026cbc9d343SJin Yu struct timeout_io_cb_arg {
2027cbc9d343SJin Yu 	struct iovec iov;
2028cbc9d343SJin Yu 	uint8_t type;
2029cbc9d343SJin Yu };
2030cbc9d343SJin Yu 
2031cbc9d343SJin Yu static int
2032cbc9d343SJin Yu bdev_channel_count_submitted_io(struct spdk_bdev_channel *ch)
2033cbc9d343SJin Yu {
2034cbc9d343SJin Yu 	struct spdk_bdev_io *bdev_io;
2035cbc9d343SJin Yu 	int n = 0;
2036cbc9d343SJin Yu 
2037cbc9d343SJin Yu 	if (!ch) {
2038cbc9d343SJin Yu 		return -1;
2039cbc9d343SJin Yu 	}
2040cbc9d343SJin Yu 
2041cbc9d343SJin Yu 	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
2042cbc9d343SJin Yu 		n++;
2043cbc9d343SJin Yu 	}
2044cbc9d343SJin Yu 
2045cbc9d343SJin Yu 	return n;
2046cbc9d343SJin Yu }
2047cbc9d343SJin Yu 
2048cbc9d343SJin Yu static void
2049cbc9d343SJin Yu bdev_channel_io_timeout_cb(void *cb_arg, struct spdk_bdev_io *bdev_io)
2050cbc9d343SJin Yu {
2051cbc9d343SJin Yu 	struct timeout_io_cb_arg *ctx = cb_arg;
2052cbc9d343SJin Yu 
2053cbc9d343SJin Yu 	ctx->type = bdev_io->type;
2054cbc9d343SJin Yu 	ctx->iov.iov_base = bdev_io->iov.iov_base;
2055cbc9d343SJin Yu 	ctx->iov.iov_len = bdev_io->iov.iov_len;
2056cbc9d343SJin Yu }
2057cbc9d343SJin Yu 
2058b90b7ce4SJim Harris static bool g_io_done;
2059b90b7ce4SJim Harris 
2060cbc9d343SJin Yu static void
2061cbc9d343SJin Yu io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2062cbc9d343SJin Yu {
2063b90b7ce4SJim Harris 	g_io_done = true;
2064cbc9d343SJin Yu 	spdk_bdev_free_io(bdev_io);
2065cbc9d343SJin Yu }
2066cbc9d343SJin Yu 
2067cbc9d343SJin Yu static void
2068cbc9d343SJin Yu bdev_set_io_timeout_mt(void)
2069cbc9d343SJin Yu {
2070cbc9d343SJin Yu 	struct spdk_io_channel *ch[3];
2071cbc9d343SJin Yu 	struct spdk_bdev_channel *bdev_ch[3];
2072cbc9d343SJin Yu 	struct timeout_io_cb_arg cb_arg;
2073cbc9d343SJin Yu 
2074cbc9d343SJin Yu 	setup_test();
2075cbc9d343SJin Yu 
2076cbc9d343SJin Yu 	g_bdev.bdev.optimal_io_boundary = 16;
2077cbc9d343SJin Yu 	g_bdev.bdev.split_on_optimal_io_boundary = true;
2078cbc9d343SJin Yu 
2079cbc9d343SJin Yu 	set_thread(0);
2080cbc9d343SJin Yu 	ch[0] = spdk_bdev_get_io_channel(g_desc);
2081cbc9d343SJin Yu 	CU_ASSERT(ch[0] != NULL);
2082cbc9d343SJin Yu 
2083cbc9d343SJin Yu 	set_thread(1);
2084cbc9d343SJin Yu 	ch[1] = spdk_bdev_get_io_channel(g_desc);
2085cbc9d343SJin Yu 	CU_ASSERT(ch[1] != NULL);
2086cbc9d343SJin Yu 
2087cbc9d343SJin Yu 	set_thread(2);
2088cbc9d343SJin Yu 	ch[2] = spdk_bdev_get_io_channel(g_desc);
2089cbc9d343SJin Yu 	CU_ASSERT(ch[2] != NULL);
2090cbc9d343SJin Yu 
2091cbc9d343SJin Yu 	/* Multi-thread mode
2092cbc9d343SJin Yu 	 * 1, Check the poller was registered successfully
2093cbc9d343SJin Yu 	 * 2, Check the timeout IO and ensure the IO was the submitted by user
2094cbc9d343SJin Yu 	 * 3, Check the link int the bdev_ch works right.
2095cbc9d343SJin Yu 	 * 4, Close desc and put io channel during the timeout poller is polling
2096cbc9d343SJin Yu 	 */
2097cbc9d343SJin Yu 
2098cbc9d343SJin Yu 	/* In desc thread set the timeout */
2099cbc9d343SJin Yu 	set_thread(0);
2100cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_set_timeout(g_desc, 5, bdev_channel_io_timeout_cb, &cb_arg) == 0);
2101cbc9d343SJin Yu 	CU_ASSERT(g_desc->io_timeout_poller != NULL);
2102cbc9d343SJin Yu 	CU_ASSERT(g_desc->cb_fn == bdev_channel_io_timeout_cb);
2103cbc9d343SJin Yu 	CU_ASSERT(g_desc->cb_arg == &cb_arg);
2104cbc9d343SJin Yu 
2105cbc9d343SJin Yu 	/* check the IO submitted list and timeout handler */
2106cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x2000, 0, 1, io_done, NULL) == 0);
2107cbc9d343SJin Yu 	bdev_ch[0] = spdk_io_channel_get_ctx(ch[0]);
2108cbc9d343SJin Yu 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 1);
2109cbc9d343SJin Yu 
2110cbc9d343SJin Yu 	set_thread(1);
2111cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2112cbc9d343SJin Yu 	bdev_ch[1] = spdk_io_channel_get_ctx(ch[1]);
2113cbc9d343SJin Yu 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 1);
2114cbc9d343SJin Yu 
2115cbc9d343SJin Yu 	/* Now test that a single-vector command is split correctly.
2116cbc9d343SJin Yu 	 * Offset 14, length 8, payload 0xF000
2117cbc9d343SJin Yu 	 *  Child - Offset 14, length 2, payload 0xF000
2118cbc9d343SJin Yu 	 *  Child - Offset 16, length 6, payload 0xF000 + 2 * 512
2119cbc9d343SJin Yu 	 *
2120cbc9d343SJin Yu 	 * Set up the expected values before calling spdk_bdev_read_blocks
2121cbc9d343SJin Yu 	 */
2122cbc9d343SJin Yu 	set_thread(2);
2123cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0xF000, 14, 8, io_done, NULL) == 0);
2124cbc9d343SJin Yu 	bdev_ch[2] = spdk_io_channel_get_ctx(ch[2]);
2125fc3e4061SShuhei Matsumoto 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 3);
2126cbc9d343SJin Yu 
2127cbc9d343SJin Yu 	set_thread(0);
2128cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2129cbc9d343SJin Yu 	spdk_delay_us(3 * spdk_get_ticks_hz());
2130cbc9d343SJin Yu 	poll_threads();
2131cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == 0);
2132cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2133cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2134cbc9d343SJin Yu 
2135cbc9d343SJin Yu 	/* Now the time reach the limit */
2136cbc9d343SJin Yu 	spdk_delay_us(3 * spdk_get_ticks_hz());
2137cbc9d343SJin Yu 	poll_thread(0);
2138cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2139cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2140cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2141cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2142cbc9d343SJin Yu 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[0]) == 0);
2143cbc9d343SJin Yu 
2144cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2145cbc9d343SJin Yu 	set_thread(1);
2146cbc9d343SJin Yu 	poll_thread(1);
2147cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2148cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x1000);
2149cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 1 * g_bdev.bdev.blocklen);
2150cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2151cbc9d343SJin Yu 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[1]) == 0);
2152cbc9d343SJin Yu 
2153cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2154cbc9d343SJin Yu 	set_thread(2);
2155cbc9d343SJin Yu 	poll_thread(2);
2156cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_READ);
2157cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0xF000);
2158cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 8 * g_bdev.bdev.blocklen);
2159cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2160fc3e4061SShuhei Matsumoto 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 2);
2161cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2162cbc9d343SJin Yu 	CU_ASSERT(bdev_channel_count_submitted_io(bdev_ch[2]) == 0);
2163cbc9d343SJin Yu 
2164cbc9d343SJin Yu 	/* Run poll_timeout_done() it means complete the timeout poller */
2165cbc9d343SJin Yu 	set_thread(0);
2166cbc9d343SJin Yu 	poll_thread(0);
2167cbc9d343SJin Yu 	CU_ASSERT(g_desc->refs == 0);
2168cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[0], (void *)0x1000, 0, 1, io_done, NULL) == 0);
2169cbc9d343SJin Yu 	set_thread(1);
2170cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_write_blocks(g_desc, ch[1], (void *)0x2000, 0, 2, io_done, NULL) == 0);
2171cbc9d343SJin Yu 	set_thread(2);
2172cbc9d343SJin Yu 	CU_ASSERT(spdk_bdev_read_blocks(g_desc, ch[2], (void *)0x3000, 0, 3, io_done, NULL) == 0);
2173cbc9d343SJin Yu 
2174cbc9d343SJin Yu 	/* Trigger timeout poller to run again, desc->refs is incremented.
2175cbc9d343SJin Yu 	 * In thread 0 we destroy the io channel before timeout poller runs.
2176cbc9d343SJin Yu 	 * Timeout callback is not called on thread 0.
2177cbc9d343SJin Yu 	 */
2178cbc9d343SJin Yu 	spdk_delay_us(6 * spdk_get_ticks_hz());
2179cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2180cbc9d343SJin Yu 	set_thread(0);
2181cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2182cbc9d343SJin Yu 	spdk_put_io_channel(ch[0]);
2183cbc9d343SJin Yu 	poll_thread(0);
2184cbc9d343SJin Yu 	CU_ASSERT(g_desc->refs == 1)
2185cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == 0);
2186cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2187cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2188cbc9d343SJin Yu 
2189cbc9d343SJin Yu 	/* In thread 1 timeout poller runs then we destroy the io channel
2190cbc9d343SJin Yu 	 * Timeout callback is called on thread 1.
2191cbc9d343SJin Yu 	 */
2192cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2193cbc9d343SJin Yu 	set_thread(1);
2194cbc9d343SJin Yu 	poll_thread(1);
2195cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2196cbc9d343SJin Yu 	spdk_put_io_channel(ch[1]);
2197cbc9d343SJin Yu 	poll_thread(1);
2198cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == SPDK_BDEV_IO_TYPE_WRITE);
2199cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x2000);
2200cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 2 * g_bdev.bdev.blocklen);
2201cbc9d343SJin Yu 
2202cbc9d343SJin Yu 	/* Close the desc.
2203cbc9d343SJin Yu 	 * Unregister the timeout poller first.
2204cbc9d343SJin Yu 	 * Then decrement desc->refs but it's not zero yet so desc is not freed.
2205cbc9d343SJin Yu 	 */
2206cbc9d343SJin Yu 	set_thread(0);
2207cbc9d343SJin Yu 	spdk_bdev_close(g_desc);
2208cbc9d343SJin Yu 	CU_ASSERT(g_desc->refs == 1);
2209cbc9d343SJin Yu 	CU_ASSERT(g_desc->io_timeout_poller == NULL);
2210cbc9d343SJin Yu 
2211cbc9d343SJin Yu 	/* Timeout poller runs on thread 2 then we destroy the io channel.
2212cbc9d343SJin Yu 	 * Desc is closed so we would exit the timeout poller directly.
2213cbc9d343SJin Yu 	 * timeout callback is not called on thread 2.
2214cbc9d343SJin Yu 	 */
2215cbc9d343SJin Yu 	memset(&cb_arg, 0, sizeof(cb_arg));
2216cbc9d343SJin Yu 	set_thread(2);
2217cbc9d343SJin Yu 	poll_thread(2);
2218cbc9d343SJin Yu 	stub_complete_io(g_bdev.io_target, 1);
2219cbc9d343SJin Yu 	spdk_put_io_channel(ch[2]);
2220cbc9d343SJin Yu 	poll_thread(2);
2221cbc9d343SJin Yu 	CU_ASSERT(cb_arg.type == 0);
2222cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_base == (void *)0x0);
2223cbc9d343SJin Yu 	CU_ASSERT(cb_arg.iov.iov_len == 0);
2224cbc9d343SJin Yu 
2225cbc9d343SJin Yu 	set_thread(0);
2226cbc9d343SJin Yu 	poll_thread(0);
2227cbc9d343SJin Yu 	g_teardown_done = false;
2228cbc9d343SJin Yu 	unregister_bdev(&g_bdev);
2229cbc9d343SJin Yu 	spdk_io_device_unregister(&g_io_device, NULL);
2230cbc9d343SJin Yu 	spdk_bdev_finish(finish_cb, NULL);
22315a3e64efSKonrad Sztyber 	spdk_iobuf_finish(finish_cb, NULL);
2232cbc9d343SJin Yu 	poll_threads();
2233cbc9d343SJin Yu 	memset(&g_bdev, 0, sizeof(g_bdev));
2234cbc9d343SJin Yu 	CU_ASSERT(g_teardown_done == true);
2235cbc9d343SJin Yu 	g_teardown_done = false;
2236cbc9d343SJin Yu 	free_threads();
2237972b3ae3SShuhei Matsumoto 	free_cores();
2238cbc9d343SJin Yu }
2239cbc9d343SJin Yu 
2240ebd1a4f7SJim Harris static bool g_io_done2;
2241b90b7ce4SJim Harris static bool g_lock_lba_range_done;
2242b90b7ce4SJim Harris static bool g_unlock_lba_range_done;
2243b90b7ce4SJim Harris 
2244b90b7ce4SJim Harris static void
2245ebd1a4f7SJim Harris io_done2(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
2246ebd1a4f7SJim Harris {
2247ebd1a4f7SJim Harris 	g_io_done2 = true;
2248ebd1a4f7SJim Harris 	spdk_bdev_free_io(bdev_io);
2249ebd1a4f7SJim Harris }
2250ebd1a4f7SJim Harris 
2251ebd1a4f7SJim Harris static void
2252687cfd4bSArtur Paszkiewicz lock_lba_range_done(struct lba_range *range, void *ctx, int status)
2253b90b7ce4SJim Harris {
2254b90b7ce4SJim Harris 	g_lock_lba_range_done = true;
2255b90b7ce4SJim Harris }
2256b90b7ce4SJim Harris 
2257b90b7ce4SJim Harris static void
2258687cfd4bSArtur Paszkiewicz unlock_lba_range_done(struct lba_range *range, void *ctx, int status)
2259b90b7ce4SJim Harris {
2260b90b7ce4SJim Harris 	g_unlock_lba_range_done = true;
2261b90b7ce4SJim Harris }
2262b90b7ce4SJim Harris 
2263b90b7ce4SJim Harris static uint32_t
2264b90b7ce4SJim Harris stub_channel_outstanding_cnt(void *io_target)
2265b90b7ce4SJim Harris {
2266b90b7ce4SJim Harris 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
2267b90b7ce4SJim Harris 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
2268b90b7ce4SJim Harris 	uint32_t outstanding_cnt;
2269b90b7ce4SJim Harris 
2270b90b7ce4SJim Harris 	outstanding_cnt = ch->outstanding_cnt;
2271b90b7ce4SJim Harris 
2272b90b7ce4SJim Harris 	spdk_put_io_channel(_ch);
2273b90b7ce4SJim Harris 	return outstanding_cnt;
2274b90b7ce4SJim Harris }
2275b90b7ce4SJim Harris 
2276b90b7ce4SJim Harris static void
2277b90b7ce4SJim Harris lock_lba_range_then_submit_io(void)
2278b90b7ce4SJim Harris {
2279b90b7ce4SJim Harris 	struct spdk_bdev_desc *desc = NULL;
2280b90b7ce4SJim Harris 	void *io_target;
2281b90b7ce4SJim Harris 	struct spdk_io_channel *io_ch[3];
2282b90b7ce4SJim Harris 	struct spdk_bdev_channel *bdev_ch[3];
2283b90b7ce4SJim Harris 	struct lba_range *range;
2284b90b7ce4SJim Harris 	char buf[4096];
2285ebd1a4f7SJim Harris 	int ctx0, ctx1, ctx2;
2286b90b7ce4SJim Harris 	int rc;
2287b90b7ce4SJim Harris 
2288b90b7ce4SJim Harris 	setup_test();
2289b90b7ce4SJim Harris 
2290b90b7ce4SJim Harris 	io_target = g_bdev.io_target;
2291b90b7ce4SJim Harris 	desc = g_desc;
2292b90b7ce4SJim Harris 
2293b90b7ce4SJim Harris 	set_thread(0);
2294b90b7ce4SJim Harris 	io_ch[0] = spdk_bdev_get_io_channel(desc);
2295b90b7ce4SJim Harris 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
2296b90b7ce4SJim Harris 	CU_ASSERT(io_ch[0] != NULL);
2297b90b7ce4SJim Harris 
2298b90b7ce4SJim Harris 	set_thread(1);
2299b90b7ce4SJim Harris 	io_ch[1] = spdk_bdev_get_io_channel(desc);
2300b90b7ce4SJim Harris 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
2301b90b7ce4SJim Harris 	CU_ASSERT(io_ch[1] != NULL);
2302b90b7ce4SJim Harris 
2303b90b7ce4SJim Harris 	set_thread(0);
2304b90b7ce4SJim Harris 	g_lock_lba_range_done = false;
2305b90b7ce4SJim Harris 	rc = bdev_lock_lba_range(desc, io_ch[0], 20, 10, lock_lba_range_done, &ctx0);
2306b90b7ce4SJim Harris 	CU_ASSERT(rc == 0);
2307b90b7ce4SJim Harris 	poll_threads();
2308b90b7ce4SJim Harris 
2309b90b7ce4SJim Harris 	/* The lock should immediately become valid, since there are no outstanding
2310b90b7ce4SJim Harris 	 * write I/O.
2311b90b7ce4SJim Harris 	 */
2312b90b7ce4SJim Harris 	CU_ASSERT(g_lock_lba_range_done == true);
2313b90b7ce4SJim Harris 	range = TAILQ_FIRST(&bdev_ch[0]->locked_ranges);
2314b90b7ce4SJim Harris 	SPDK_CU_ASSERT_FATAL(range != NULL);
2315b90b7ce4SJim Harris 	CU_ASSERT(range->offset == 20);
2316b90b7ce4SJim Harris 	CU_ASSERT(range->length == 10);
2317b90b7ce4SJim Harris 	CU_ASSERT(range->owner_ch == bdev_ch[0]);
2318b90b7ce4SJim Harris 
2319b90b7ce4SJim Harris 	g_io_done = false;
2320b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2321b90b7ce4SJim Harris 	rc = spdk_bdev_read_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2322b90b7ce4SJim Harris 	CU_ASSERT(rc == 0);
2323b90b7ce4SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2324b90b7ce4SJim Harris 
2325b90b7ce4SJim Harris 	stub_complete_io(io_target, 1);
2326b90b7ce4SJim Harris 	poll_threads();
2327b90b7ce4SJim Harris 	CU_ASSERT(g_io_done == true);
2328b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2329b90b7ce4SJim Harris 
2330b90b7ce4SJim Harris 	/* Try a write I/O.  This should actually be allowed to execute, since the channel
2331b90b7ce4SJim Harris 	 * holding the lock is submitting the write I/O.
2332b90b7ce4SJim Harris 	 */
2333b90b7ce4SJim Harris 	g_io_done = false;
2334b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2335b90b7ce4SJim Harris 	rc = spdk_bdev_write_blocks(desc, io_ch[0], buf, 20, 1, io_done, &ctx0);
2336b90b7ce4SJim Harris 	CU_ASSERT(rc == 0);
2337b90b7ce4SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2338b90b7ce4SJim Harris 
2339b90b7ce4SJim Harris 	stub_complete_io(io_target, 1);
2340b90b7ce4SJim Harris 	poll_threads();
2341b90b7ce4SJim Harris 	CU_ASSERT(g_io_done == true);
2342b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->io_locked));
2343b90b7ce4SJim Harris 
2344b90b7ce4SJim Harris 	/* Try a write I/O.  This should get queued in the io_locked tailq. */
2345b90b7ce4SJim Harris 	set_thread(1);
2346b90b7ce4SJim Harris 	g_io_done = false;
2347b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2348b90b7ce4SJim Harris 	rc = spdk_bdev_write_blocks(desc, io_ch[1], buf, 20, 1, io_done, &ctx1);
2349b90b7ce4SJim Harris 	CU_ASSERT(rc == 0);
2350b90b7ce4SJim Harris 	poll_threads();
2351b90b7ce4SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2352b90b7ce4SJim Harris 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2353b90b7ce4SJim Harris 	CU_ASSERT(g_io_done == false);
2354b90b7ce4SJim Harris 
2355b90b7ce4SJim Harris 	/* Try to unlock the lba range using thread 1's io_ch.  This should fail. */
2356b90b7ce4SJim Harris 	rc = bdev_unlock_lba_range(desc, io_ch[1], 20, 10, unlock_lba_range_done, &ctx1);
2357b90b7ce4SJim Harris 	CU_ASSERT(rc == -EINVAL);
2358b90b7ce4SJim Harris 
2359ebd1a4f7SJim Harris 	/* Now create a new channel and submit a write I/O with it.  This should also be queued.
2360ebd1a4f7SJim Harris 	 * The new channel should inherit the active locks from the bdev's internal list.
2361ebd1a4f7SJim Harris 	 */
2362ebd1a4f7SJim Harris 	set_thread(2);
2363ebd1a4f7SJim Harris 	io_ch[2] = spdk_bdev_get_io_channel(desc);
2364ebd1a4f7SJim Harris 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
2365ebd1a4f7SJim Harris 	CU_ASSERT(io_ch[2] != NULL);
2366ebd1a4f7SJim Harris 
2367ebd1a4f7SJim Harris 	g_io_done2 = false;
2368ebd1a4f7SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2369ebd1a4f7SJim Harris 	rc = spdk_bdev_write_blocks(desc, io_ch[2], buf, 22, 2, io_done2, &ctx2);
2370ebd1a4f7SJim Harris 	CU_ASSERT(rc == 0);
2371ebd1a4f7SJim Harris 	poll_threads();
2372ebd1a4f7SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 0);
2373ebd1a4f7SJim Harris 	CU_ASSERT(!TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2374ebd1a4f7SJim Harris 	CU_ASSERT(g_io_done2 == false);
2375ebd1a4f7SJim Harris 
2376b90b7ce4SJim Harris 	set_thread(0);
2377b90b7ce4SJim Harris 	rc = bdev_unlock_lba_range(desc, io_ch[0], 20, 10, unlock_lba_range_done, &ctx0);
2378b90b7ce4SJim Harris 	CU_ASSERT(rc == 0);
2379b90b7ce4SJim Harris 	poll_threads();
2380b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[0]->locked_ranges));
2381b90b7ce4SJim Harris 
2382b90b7ce4SJim Harris 	/* The LBA range is unlocked, so the write IOs should now have started execution. */
2383b90b7ce4SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[1]->io_locked));
2384ebd1a4f7SJim Harris 	CU_ASSERT(TAILQ_EMPTY(&bdev_ch[2]->io_locked));
2385b90b7ce4SJim Harris 
2386b90b7ce4SJim Harris 	set_thread(1);
2387b90b7ce4SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2388b90b7ce4SJim Harris 	stub_complete_io(io_target, 1);
2389ebd1a4f7SJim Harris 	set_thread(2);
2390ebd1a4f7SJim Harris 	CU_ASSERT(stub_channel_outstanding_cnt(io_target) == 1);
2391ebd1a4f7SJim Harris 	stub_complete_io(io_target, 1);
2392b90b7ce4SJim Harris 
2393b90b7ce4SJim Harris 	poll_threads();
2394b90b7ce4SJim Harris 	CU_ASSERT(g_io_done == true);
2395ebd1a4f7SJim Harris 	CU_ASSERT(g_io_done2 == true);
2396b90b7ce4SJim Harris 
2397b90b7ce4SJim Harris 	/* Tear down the channels */
2398b90b7ce4SJim Harris 	set_thread(0);
2399b90b7ce4SJim Harris 	spdk_put_io_channel(io_ch[0]);
2400b90b7ce4SJim Harris 	set_thread(1);
2401b90b7ce4SJim Harris 	spdk_put_io_channel(io_ch[1]);
2402ebd1a4f7SJim Harris 	set_thread(2);
2403ebd1a4f7SJim Harris 	spdk_put_io_channel(io_ch[2]);
2404b90b7ce4SJim Harris 	poll_threads();
2405b90b7ce4SJim Harris 	set_thread(0);
2406b90b7ce4SJim Harris 	teardown_test();
2407b90b7ce4SJim Harris }
2408b90b7ce4SJim Harris 
2409494eb6e5SShuhei Matsumoto /* spdk_bdev_reset() freezes and unfreezes I/O channels by using spdk_for_each_channel().
2410494eb6e5SShuhei Matsumoto  * spdk_bdev_unregister() calls spdk_io_device_unregister() in the end. However
2411494eb6e5SShuhei Matsumoto  * spdk_io_device_unregister() fails if it is called while executing spdk_for_each_channel().
2412494eb6e5SShuhei Matsumoto  * Hence, in this case, spdk_io_device_unregister() is deferred until spdk_bdev_reset()
2413494eb6e5SShuhei Matsumoto  * completes. Test this behavior.
2414494eb6e5SShuhei Matsumoto  */
2415494eb6e5SShuhei Matsumoto static void
2416494eb6e5SShuhei Matsumoto unregister_during_reset(void)
2417494eb6e5SShuhei Matsumoto {
2418494eb6e5SShuhei Matsumoto 	struct spdk_io_channel *io_ch[2];
2419494eb6e5SShuhei Matsumoto 	bool done_reset = false, done_unregister = false;
2420494eb6e5SShuhei Matsumoto 	int rc;
2421494eb6e5SShuhei Matsumoto 
2422494eb6e5SShuhei Matsumoto 	setup_test();
2423494eb6e5SShuhei Matsumoto 	set_thread(0);
2424494eb6e5SShuhei Matsumoto 
2425494eb6e5SShuhei Matsumoto 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
2426494eb6e5SShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(io_ch[0] != NULL);
2427494eb6e5SShuhei Matsumoto 
2428494eb6e5SShuhei Matsumoto 	set_thread(1);
2429494eb6e5SShuhei Matsumoto 
2430494eb6e5SShuhei Matsumoto 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
2431494eb6e5SShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(io_ch[1] != NULL);
2432494eb6e5SShuhei Matsumoto 
2433494eb6e5SShuhei Matsumoto 	set_thread(0);
2434494eb6e5SShuhei Matsumoto 
2435494eb6e5SShuhei Matsumoto 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2436494eb6e5SShuhei Matsumoto 
2437494eb6e5SShuhei Matsumoto 	rc = spdk_bdev_reset(g_desc, io_ch[0], reset_done, &done_reset);
2438494eb6e5SShuhei Matsumoto 	CU_ASSERT(rc == 0);
2439494eb6e5SShuhei Matsumoto 
2440494eb6e5SShuhei Matsumoto 	set_thread(0);
2441494eb6e5SShuhei Matsumoto 
2442494eb6e5SShuhei Matsumoto 	poll_thread_times(0, 1);
2443494eb6e5SShuhei Matsumoto 
2444494eb6e5SShuhei Matsumoto 	spdk_bdev_close(g_desc);
2445494eb6e5SShuhei Matsumoto 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
2446494eb6e5SShuhei Matsumoto 
2447494eb6e5SShuhei Matsumoto 	CU_ASSERT(done_reset == false);
2448494eb6e5SShuhei Matsumoto 	CU_ASSERT(done_unregister == false);
2449494eb6e5SShuhei Matsumoto 
2450494eb6e5SShuhei Matsumoto 	poll_threads();
2451494eb6e5SShuhei Matsumoto 
2452494eb6e5SShuhei Matsumoto 	stub_complete_io(g_bdev.io_target, 0);
2453494eb6e5SShuhei Matsumoto 
2454494eb6e5SShuhei Matsumoto 	poll_threads();
2455494eb6e5SShuhei Matsumoto 
2456494eb6e5SShuhei Matsumoto 	CU_ASSERT(done_reset == true);
2457494eb6e5SShuhei Matsumoto 	CU_ASSERT(done_unregister == false);
2458494eb6e5SShuhei Matsumoto 
2459494eb6e5SShuhei Matsumoto 	spdk_put_io_channel(io_ch[0]);
2460494eb6e5SShuhei Matsumoto 
2461494eb6e5SShuhei Matsumoto 	set_thread(1);
2462494eb6e5SShuhei Matsumoto 
2463494eb6e5SShuhei Matsumoto 	spdk_put_io_channel(io_ch[1]);
2464494eb6e5SShuhei Matsumoto 
2465494eb6e5SShuhei Matsumoto 	poll_threads();
2466494eb6e5SShuhei Matsumoto 
2467494eb6e5SShuhei Matsumoto 	CU_ASSERT(done_unregister == true);
2468494eb6e5SShuhei Matsumoto 
2469494eb6e5SShuhei Matsumoto 	/* Restore the original g_bdev so that we can use teardown_test(). */
2470494eb6e5SShuhei Matsumoto 	set_thread(0);
2471494eb6e5SShuhei Matsumoto 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
2472494eb6e5SShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
2473494eb6e5SShuhei Matsumoto 	teardown_test();
2474494eb6e5SShuhei Matsumoto }
2475494eb6e5SShuhei Matsumoto 
2476a6e58cc4SMike Gerdts static void
2477a6e58cc4SMike Gerdts bdev_init_wt_cb(void *done, int rc)
2478a6e58cc4SMike Gerdts {
2479a6e58cc4SMike Gerdts }
2480a6e58cc4SMike Gerdts 
2481a6e58cc4SMike Gerdts static int
2482a6e58cc4SMike Gerdts wrong_thread_setup(void)
2483a6e58cc4SMike Gerdts {
2484a6e58cc4SMike Gerdts 	allocate_cores(1);
2485a6e58cc4SMike Gerdts 	allocate_threads(2);
2486a6e58cc4SMike Gerdts 	set_thread(0);
2487a6e58cc4SMike Gerdts 
248880b22cf3SKonrad Sztyber 	spdk_io_device_register(&g_accel_io_device, ut_accel_ch_create_cb,
248980b22cf3SKonrad Sztyber 				ut_accel_ch_destroy_cb, 0, NULL);
2490a6e58cc4SMike Gerdts 	spdk_bdev_initialize(bdev_init_wt_cb, NULL);
2491a6e58cc4SMike Gerdts 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
2492a6e58cc4SMike Gerdts 				sizeof(struct ut_bdev_channel), NULL);
2493a6e58cc4SMike Gerdts 
2494a6e58cc4SMike Gerdts 	set_thread(1);
2495a6e58cc4SMike Gerdts 
2496a6e58cc4SMike Gerdts 	return 0;
2497a6e58cc4SMike Gerdts }
2498a6e58cc4SMike Gerdts 
2499a6e58cc4SMike Gerdts static int
2500a6e58cc4SMike Gerdts wrong_thread_teardown(void)
2501a6e58cc4SMike Gerdts {
25023e7394afSMike Gerdts 	int rc = 0;
2503a6e58cc4SMike Gerdts 
2504a6e58cc4SMike Gerdts 	set_thread(0);
2505a6e58cc4SMike Gerdts 
2506a6e58cc4SMike Gerdts 	g_teardown_done = false;
2507a6e58cc4SMike Gerdts 	spdk_io_device_unregister(&g_io_device, NULL);
2508a6e58cc4SMike Gerdts 	spdk_bdev_finish(finish_cb, NULL);
2509a6e58cc4SMike Gerdts 	poll_threads();
2510a6e58cc4SMike Gerdts 	memset(&g_bdev, 0, sizeof(g_bdev));
2511a6e58cc4SMike Gerdts 	if (!g_teardown_done) {
2512a6e58cc4SMike Gerdts 		fprintf(stderr, "%s:%d %s: teardown not done\n", __FILE__, __LINE__, __func__);
2513a6e58cc4SMike Gerdts 		rc = -1;
2514a6e58cc4SMike Gerdts 	}
2515a6e58cc4SMike Gerdts 	g_teardown_done = false;
2516a6e58cc4SMike Gerdts 
251780b22cf3SKonrad Sztyber 	spdk_io_device_unregister(&g_accel_io_device, NULL);
2518a6e58cc4SMike Gerdts 	free_threads();
2519a6e58cc4SMike Gerdts 	free_cores();
2520a6e58cc4SMike Gerdts 
2521a6e58cc4SMike Gerdts 	return rc;
2522a6e58cc4SMike Gerdts }
2523a6e58cc4SMike Gerdts 
2524a6e58cc4SMike Gerdts static void
25253e7394afSMike Gerdts _bdev_unregistered_wt(void *ctx, int rc)
25263e7394afSMike Gerdts {
25273e7394afSMike Gerdts 	struct spdk_thread **threadp = ctx;
25283e7394afSMike Gerdts 
25293e7394afSMike Gerdts 	*threadp = spdk_get_thread();
25303e7394afSMike Gerdts }
25313e7394afSMike Gerdts 
25323e7394afSMike Gerdts static void
2533a6e58cc4SMike Gerdts spdk_bdev_register_wt(void)
2534a6e58cc4SMike Gerdts {
2535a6e58cc4SMike Gerdts 	struct spdk_bdev bdev = { 0 };
2536a6e58cc4SMike Gerdts 	int rc;
25373e7394afSMike Gerdts 	struct spdk_thread *unreg_thread;
2538a6e58cc4SMike Gerdts 
2539a6e58cc4SMike Gerdts 	bdev.name = "wt_bdev";
2540a6e58cc4SMike Gerdts 	bdev.fn_table = &fn_table;
2541a6e58cc4SMike Gerdts 	bdev.module = &bdev_ut_if;
2542a6e58cc4SMike Gerdts 	bdev.blocklen = 4096;
2543a6e58cc4SMike Gerdts 	bdev.blockcnt = 1024;
2544a6e58cc4SMike Gerdts 
2545a6e58cc4SMike Gerdts 	/* Can register only on app thread */
2546a6e58cc4SMike Gerdts 	rc = spdk_bdev_register(&bdev);
25473e7394afSMike Gerdts 	CU_ASSERT(rc == -EINVAL);
2548a6e58cc4SMike Gerdts 
2549a6e58cc4SMike Gerdts 	/* Can unregister on any thread */
25503e7394afSMike Gerdts 	set_thread(0);
2551a6e58cc4SMike Gerdts 	rc = spdk_bdev_register(&bdev);
2552a6e58cc4SMike Gerdts 	CU_ASSERT(rc == 0);
25533e7394afSMike Gerdts 	set_thread(1);
25543e7394afSMike Gerdts 	unreg_thread = NULL;
25553e7394afSMike Gerdts 	spdk_bdev_unregister(&bdev, _bdev_unregistered_wt, &unreg_thread);
25563e7394afSMike Gerdts 	poll_threads();
25573e7394afSMike Gerdts 	CU_ASSERT(unreg_thread == spdk_get_thread());
25583e7394afSMike Gerdts 
25593e7394afSMike Gerdts 	/* Can unregister by name on any thread */
25603e7394afSMike Gerdts 	set_thread(0);
25613e7394afSMike Gerdts 	rc = spdk_bdev_register(&bdev);
25623e7394afSMike Gerdts 	CU_ASSERT(rc == 0);
25633e7394afSMike Gerdts 	set_thread(1);
25643e7394afSMike Gerdts 	unreg_thread = NULL;
25653e7394afSMike Gerdts 	rc = spdk_bdev_unregister_by_name(bdev.name, bdev.module, _bdev_unregistered_wt,
25663e7394afSMike Gerdts 					  &unreg_thread);
2567a6e58cc4SMike Gerdts 	CU_ASSERT(rc == 0);
2568a6e58cc4SMike Gerdts 	poll_threads();
25693e7394afSMike Gerdts 	CU_ASSERT(unreg_thread == spdk_get_thread());
2570a6e58cc4SMike Gerdts }
2571a6e58cc4SMike Gerdts 
2572a6e58cc4SMike Gerdts static void
2573a6e58cc4SMike Gerdts wait_for_examine_cb(void *arg)
2574a6e58cc4SMike Gerdts {
2575a6e58cc4SMike Gerdts 	struct spdk_thread **thread = arg;
2576a6e58cc4SMike Gerdts 
2577a6e58cc4SMike Gerdts 	*thread = spdk_get_thread();
2578a6e58cc4SMike Gerdts }
2579a6e58cc4SMike Gerdts 
2580a6e58cc4SMike Gerdts static void
2581a6e58cc4SMike Gerdts spdk_bdev_examine_wt(void)
2582a6e58cc4SMike Gerdts {
2583a6e58cc4SMike Gerdts 	int rc;
2584a6e58cc4SMike Gerdts 	bool save_auto_examine = g_bdev_opts.bdev_auto_examine;
2585a6e58cc4SMike Gerdts 	struct spdk_thread *thread;
2586a6e58cc4SMike Gerdts 
2587a6e58cc4SMike Gerdts 	g_bdev_opts.bdev_auto_examine = false;
2588a6e58cc4SMike Gerdts 
2589a6e58cc4SMike Gerdts 	set_thread(0);
2590a6e58cc4SMike Gerdts 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2591a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2592a6e58cc4SMike Gerdts 	set_thread(1);
2593a6e58cc4SMike Gerdts 
2594a6e58cc4SMike Gerdts 	/* Can examine only on the app thread */
2595a6e58cc4SMike Gerdts 	rc = spdk_bdev_examine("ut_bdev_wt");
25963e7394afSMike Gerdts 	CU_ASSERT(rc == -EINVAL);
2597a6e58cc4SMike Gerdts 	unregister_bdev(&g_bdev);
2598a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2599a6e58cc4SMike Gerdts 
2600a6e58cc4SMike Gerdts 	/* Can wait for examine on app thread, callback called on app thread. */
2601a6e58cc4SMike Gerdts 	set_thread(0);
2602a6e58cc4SMike Gerdts 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2603a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2604a6e58cc4SMike Gerdts 	thread = NULL;
2605a6e58cc4SMike Gerdts 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2606a6e58cc4SMike Gerdts 	CU_ASSERT(rc == 0);
2607a6e58cc4SMike Gerdts 	poll_threads();
2608a6e58cc4SMike Gerdts 	CU_ASSERT(thread == spdk_get_thread());
2609a6e58cc4SMike Gerdts 	unregister_bdev(&g_bdev);
2610a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2611a6e58cc4SMike Gerdts 
2612a6e58cc4SMike Gerdts 	/* Can wait for examine on non-app thread, callback called on same thread. */
2613a6e58cc4SMike Gerdts 	set_thread(0);
2614a6e58cc4SMike Gerdts 	register_bdev(&g_bdev, "ut_bdev_wt", &g_io_device);
2615a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") != NULL);
2616a6e58cc4SMike Gerdts 	thread = NULL;
2617a6e58cc4SMike Gerdts 	rc = spdk_bdev_wait_for_examine(wait_for_examine_cb, &thread);
2618a6e58cc4SMike Gerdts 	CU_ASSERT(rc == 0);
2619a6e58cc4SMike Gerdts 	poll_threads();
2620a6e58cc4SMike Gerdts 	CU_ASSERT(thread == spdk_get_thread());
2621a6e58cc4SMike Gerdts 	unregister_bdev(&g_bdev);
2622a6e58cc4SMike Gerdts 	CU_ASSERT(spdk_bdev_get_by_name("ut_bdev_wt") == NULL);
2623a6e58cc4SMike Gerdts 
2624a6e58cc4SMike Gerdts 	unregister_bdev(&g_bdev);
2625a6e58cc4SMike Gerdts 	g_bdev_opts.bdev_auto_examine = save_auto_examine;
2626a6e58cc4SMike Gerdts }
2627a6e58cc4SMike Gerdts 
26283522d43aSShuhei Matsumoto static void
26293522d43aSShuhei Matsumoto event_notify_and_close(void)
26303522d43aSShuhei Matsumoto {
26313522d43aSShuhei Matsumoto 	int resize_notify_count = 0;
26323522d43aSShuhei Matsumoto 	struct spdk_bdev_desc *desc = NULL;
26333522d43aSShuhei Matsumoto 	struct spdk_bdev *bdev;
26343522d43aSShuhei Matsumoto 	int rc;
26353522d43aSShuhei Matsumoto 
26363522d43aSShuhei Matsumoto 	setup_test();
26373522d43aSShuhei Matsumoto 	set_thread(0);
26383522d43aSShuhei Matsumoto 
26393522d43aSShuhei Matsumoto 	/* setup_test() automatically opens the bdev, but this test needs to do
26403522d43aSShuhei Matsumoto 	 * that in a different way. */
26413522d43aSShuhei Matsumoto 	spdk_bdev_close(g_desc);
26423522d43aSShuhei Matsumoto 	poll_threads();
26433522d43aSShuhei Matsumoto 
26443522d43aSShuhei Matsumoto 	set_thread(1);
26453522d43aSShuhei Matsumoto 
26463522d43aSShuhei Matsumoto 	rc = spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &resize_notify_count, &desc);
26473522d43aSShuhei Matsumoto 	CU_ASSERT(rc == 0);
26483522d43aSShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(desc != NULL);
26493522d43aSShuhei Matsumoto 
26503522d43aSShuhei Matsumoto 	bdev = spdk_bdev_desc_get_bdev(desc);
26513522d43aSShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(bdev != NULL);
26523522d43aSShuhei Matsumoto 
26533522d43aSShuhei Matsumoto 	/* Test a normal case that a resize event is notified. */
26543522d43aSShuhei Matsumoto 	set_thread(0);
26553522d43aSShuhei Matsumoto 
26563522d43aSShuhei Matsumoto 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 2);
26573522d43aSShuhei Matsumoto 	CU_ASSERT(rc == 0);
26583522d43aSShuhei Matsumoto 	CU_ASSERT(bdev->blockcnt == 1024 * 2);
26593522d43aSShuhei Matsumoto 	CU_ASSERT(desc->refs == 1);
26603522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 0);
26613522d43aSShuhei Matsumoto 
26623522d43aSShuhei Matsumoto 	poll_threads();
26633522d43aSShuhei Matsumoto 
26643522d43aSShuhei Matsumoto 	CU_ASSERT(desc->refs == 0);
26653522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 1);
26663522d43aSShuhei Matsumoto 
26673522d43aSShuhei Matsumoto 	/* Test a complex case if the bdev is closed after two event_notify messages are sent,
26683522d43aSShuhei Matsumoto 	 * then both event_notify messages are discarded and the desc is freed.
26693522d43aSShuhei Matsumoto 	 */
26703522d43aSShuhei Matsumoto 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 3);
26713522d43aSShuhei Matsumoto 	CU_ASSERT(rc == 0);
26723522d43aSShuhei Matsumoto 	CU_ASSERT(bdev->blockcnt == 1024 * 3);
26733522d43aSShuhei Matsumoto 	CU_ASSERT(desc->refs == 1);
26743522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 1);
26753522d43aSShuhei Matsumoto 
26763522d43aSShuhei Matsumoto 	rc = spdk_bdev_notify_blockcnt_change(bdev, 1024 * 4);
26773522d43aSShuhei Matsumoto 	CU_ASSERT(rc == 0);
26783522d43aSShuhei Matsumoto 	CU_ASSERT(bdev->blockcnt == 1024 * 4);
26793522d43aSShuhei Matsumoto 	CU_ASSERT(desc->refs == 2);
26803522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 1);
26813522d43aSShuhei Matsumoto 
26823522d43aSShuhei Matsumoto 	set_thread(1);
26833522d43aSShuhei Matsumoto 
26843522d43aSShuhei Matsumoto 	spdk_bdev_close(desc);
26853522d43aSShuhei Matsumoto 	CU_ASSERT(desc->closed == true);
26863522d43aSShuhei Matsumoto 	CU_ASSERT(desc->refs == 2);
26873522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 1);
26883522d43aSShuhei Matsumoto 
26893522d43aSShuhei Matsumoto 	poll_threads();
26903522d43aSShuhei Matsumoto 
26913522d43aSShuhei Matsumoto 	CU_ASSERT(resize_notify_count == 1);
26923522d43aSShuhei Matsumoto 
26933522d43aSShuhei Matsumoto 	set_thread(0);
26943522d43aSShuhei Matsumoto 
26953522d43aSShuhei Matsumoto 	/* Restore g_desc. Then, we can execute teardown_test(). */
26963522d43aSShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
26973522d43aSShuhei Matsumoto 	teardown_test();
26983522d43aSShuhei Matsumoto }
26993522d43aSShuhei Matsumoto 
270095df4e0dSShuhei Matsumoto /* There was a bug that bdev_channel_poll_qos() called spdk_for_each_channel()
270195df4e0dSShuhei Matsumoto  * after spdk_io_device_unregister() is called for a bdev.
270295df4e0dSShuhei Matsumoto  *
270395df4e0dSShuhei Matsumoto  * This occurred in the following sequence.
270495df4e0dSShuhei Matsumoto  * - There was a bdev and a channel for it.
270595df4e0dSShuhei Matsumoto  * - QoS was enabled and started.
270695df4e0dSShuhei Matsumoto  * - spdk_bdev_unregister() was called. However, there was a open descriptor.
270795df4e0dSShuhei Matsumoto  *   Hence, remove notification was sent and unregistration was pending.
270895df4e0dSShuhei Matsumoto  * - Receiving a event notification, spdk_put_io_channel() and spdk_bdev_close() were
270995df4e0dSShuhei Matsumoto  *   called. In spdk_bdev_close(), the existing QoS was unbound and a message was sent
271095df4e0dSShuhei Matsumoto  *   to it, and then the pending spdk_io_device_unregister() was finally executed.
271195df4e0dSShuhei Matsumoto  * - If bdev_channel_poll_qos() was executed before the message was processed,
271295df4e0dSShuhei Matsumoto  *   bdev_channel_poll_qos() called spdk_bdev_for_each_channel() and hit assert().
271395df4e0dSShuhei Matsumoto  *
271495df4e0dSShuhei Matsumoto  * The fix was in this case bdev_channel_poll_qos() returned immediately because QoS
271595df4e0dSShuhei Matsumoto  * was not enabled. bdev_qos_destroy() created a new disabled QoS and swapped it with
271695df4e0dSShuhei Matsumoto  * the existing QoS.
271795df4e0dSShuhei Matsumoto  *
271895df4e0dSShuhei Matsumoto  * This test case was added to avoid degradation in future.
271995df4e0dSShuhei Matsumoto  */
272095df4e0dSShuhei Matsumoto static void
272195df4e0dSShuhei Matsumoto unregister_and_qos_poller(void)
272295df4e0dSShuhei Matsumoto {
272395df4e0dSShuhei Matsumoto 	struct spdk_io_channel *io_ch;
272495df4e0dSShuhei Matsumoto 	struct spdk_bdev_channel *bdev_ch;
272595df4e0dSShuhei Matsumoto 	struct spdk_bdev_desc *desc = NULL;
272695df4e0dSShuhei Matsumoto 	struct spdk_bdev_qos *old_qos;
272795df4e0dSShuhei Matsumoto 	uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
272895df4e0dSShuhei Matsumoto 	bool remove_notify = false, done_unregister = false;
272995df4e0dSShuhei Matsumoto 	int status = -1, rc;
273095df4e0dSShuhei Matsumoto 
273195df4e0dSShuhei Matsumoto 	setup_test();
273295df4e0dSShuhei Matsumoto 	set_thread(0);
273395df4e0dSShuhei Matsumoto 
273495df4e0dSShuhei Matsumoto 	MOCK_SET(spdk_get_ticks, 10);
273595df4e0dSShuhei Matsumoto 
273695df4e0dSShuhei Matsumoto 	/* setup_test() automatically opens the bdev, but this test needs to do
273795df4e0dSShuhei Matsumoto 	 * that in a different way.
273895df4e0dSShuhei Matsumoto 	 */
273995df4e0dSShuhei Matsumoto 	spdk_bdev_close(g_desc);
274095df4e0dSShuhei Matsumoto 	poll_threads();
274195df4e0dSShuhei Matsumoto 
274295df4e0dSShuhei Matsumoto 	/* We want to get remove event notification to check if unregistration
274395df4e0dSShuhei Matsumoto 	 * is deferred.
274495df4e0dSShuhei Matsumoto 	 */
274595df4e0dSShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, &remove_notify, &desc);
274695df4e0dSShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(desc != NULL);
274795df4e0dSShuhei Matsumoto 	CU_ASSERT(remove_notify == false);
274895df4e0dSShuhei Matsumoto 
274995df4e0dSShuhei Matsumoto 	io_ch = spdk_bdev_get_io_channel(desc);
275095df4e0dSShuhei Matsumoto 	SPDK_CU_ASSERT_FATAL(io_ch != NULL);
275195df4e0dSShuhei Matsumoto 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
275295df4e0dSShuhei Matsumoto 
275395df4e0dSShuhei Matsumoto 	limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
275495df4e0dSShuhei Matsumoto 	limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
275595df4e0dSShuhei Matsumoto 	limits[SPDK_BDEV_QOS_R_BPS_RATE_LIMIT] = 0;
275695df4e0dSShuhei Matsumoto 	limits[SPDK_BDEV_QOS_W_BPS_RATE_LIMIT] = 0;
275795df4e0dSShuhei Matsumoto 	spdk_bdev_set_qos_rate_limits(&g_bdev.bdev, limits, qos_dynamic_enable_done, &status);
275895df4e0dSShuhei Matsumoto 	poll_threads();
275995df4e0dSShuhei Matsumoto 	CU_ASSERT(status == 0);
276095df4e0dSShuhei Matsumoto 	CU_ASSERT((bdev_ch->flags & BDEV_CH_QOS_ENABLED) != 0);
276195df4e0dSShuhei Matsumoto 
276295df4e0dSShuhei Matsumoto 	old_qos = g_bdev.bdev.internal.qos;
276395df4e0dSShuhei Matsumoto 	CU_ASSERT(old_qos != NULL);
276495df4e0dSShuhei Matsumoto 
276595df4e0dSShuhei Matsumoto 	spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done_unregister);
276695df4e0dSShuhei Matsumoto 	CU_ASSERT(done_unregister == false);
276795df4e0dSShuhei Matsumoto 	CU_ASSERT(remove_notify == false);
276895df4e0dSShuhei Matsumoto 
276995df4e0dSShuhei Matsumoto 	poll_threads();
277095df4e0dSShuhei Matsumoto 	CU_ASSERT(done_unregister == false);
277195df4e0dSShuhei Matsumoto 	CU_ASSERT(remove_notify == true);
277295df4e0dSShuhei Matsumoto 
277395df4e0dSShuhei Matsumoto 	spdk_put_io_channel(io_ch);
277495df4e0dSShuhei Matsumoto 	spdk_bdev_close(desc);
277595df4e0dSShuhei Matsumoto 
277695df4e0dSShuhei Matsumoto 	CU_ASSERT(g_bdev.bdev.internal.qos != NULL);
277795df4e0dSShuhei Matsumoto 	CU_ASSERT(g_bdev.bdev.internal.qos->thread == NULL);
277895df4e0dSShuhei Matsumoto 	CU_ASSERT(old_qos != g_bdev.bdev.internal.qos);
277995df4e0dSShuhei Matsumoto 
278095df4e0dSShuhei Matsumoto 	/* bdev_channel_poll_qos() has a chance to be executed in this small window. */
278195df4e0dSShuhei Matsumoto 	spdk_delay_us(SPDK_BDEV_QOS_TIMESLICE_IN_USEC);
278295df4e0dSShuhei Matsumoto 
278395df4e0dSShuhei Matsumoto 	rc = bdev_channel_poll_qos(&g_bdev.bdev);
278495df4e0dSShuhei Matsumoto 	CU_ASSERT(rc == SPDK_POLLER_IDLE);
278595df4e0dSShuhei Matsumoto 
278695df4e0dSShuhei Matsumoto 	poll_threads();
278795df4e0dSShuhei Matsumoto 
278895df4e0dSShuhei Matsumoto 	CU_ASSERT(done_unregister == true);
278995df4e0dSShuhei Matsumoto 
279095df4e0dSShuhei Matsumoto 	/* Restore the original g_bdev so that we can use teardown_test(). */
279195df4e0dSShuhei Matsumoto 	set_thread(0);
279295df4e0dSShuhei Matsumoto 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
279395df4e0dSShuhei Matsumoto 	spdk_bdev_open_ext("ut_bdev", true, _bdev_event_cb, NULL, &g_desc);
279495df4e0dSShuhei Matsumoto 	teardown_test();
279595df4e0dSShuhei Matsumoto }
279695df4e0dSShuhei Matsumoto 
2797d47eb51cSJinlong Chen /**
2798d47eb51cSJinlong Chen  * There was a race between reset start and complete:
2799d47eb51cSJinlong Chen  *
2800d47eb51cSJinlong Chen  * 1. reset_1 is completing. It clears bdev->internal.reset_in_progress and sends
2801d47eb51cSJinlong Chen  *    unfreeze_channel messages to remove queued resets of all channels.
2802d47eb51cSJinlong Chen  * 2. reset_2 is starting. As bdev->internal.reset_in_progress has been cleared, it
2803d47eb51cSJinlong Chen  *    is inserted to queued_resets list and starts to freeze channels.
2804d47eb51cSJinlong Chen  * 3. reset_1's unfreeze_channel message removes reset_2 from queued_resets list.
2805d47eb51cSJinlong Chen  * 4. reset_2 finishes freezing channels, but the corresponding bdev_io has gone,
2806d47eb51cSJinlong Chen  *    hence resulting in segmentation fault.
2807d47eb51cSJinlong Chen  *
2808d47eb51cSJinlong Chen  * To fix this,
2809d47eb51cSJinlong Chen  * 1. Do not queue the reset that is submitted to the underlying device.
2810d47eb51cSJinlong Chen  * 2. Queue all other resets in a per-bdev list, so all of them can be completed
2811d47eb51cSJinlong Chen  *    at once.
2812d47eb51cSJinlong Chen  */
2813d47eb51cSJinlong Chen static void
2814d47eb51cSJinlong Chen reset_start_complete_race(void)
2815d47eb51cSJinlong Chen {
2816d47eb51cSJinlong Chen 	struct spdk_io_channel *io_ch;
2817d47eb51cSJinlong Chen 	bool done_reset_1 = false, done_reset_2 = false;
2818d47eb51cSJinlong Chen 	uint32_t num_completed;
2819d47eb51cSJinlong Chen 	int rc;
2820d47eb51cSJinlong Chen 
2821d47eb51cSJinlong Chen 	setup_test();
2822d47eb51cSJinlong Chen 	set_thread(0);
2823d47eb51cSJinlong Chen 
2824d47eb51cSJinlong Chen 	io_ch = spdk_bdev_get_io_channel(g_desc);
2825d47eb51cSJinlong Chen 	SPDK_CU_ASSERT_FATAL(io_ch != NULL);
2826d47eb51cSJinlong Chen 
2827d47eb51cSJinlong Chen 	CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
2828d47eb51cSJinlong Chen 
2829d47eb51cSJinlong Chen 	/**
2830d47eb51cSJinlong Chen 	 * Submit reset_1.
2831d47eb51cSJinlong Chen 	 */
2832d47eb51cSJinlong Chen 	rc = spdk_bdev_reset(g_desc, io_ch, reset_done, &done_reset_1);
2833d47eb51cSJinlong Chen 	CU_ASSERT(rc == 0);
2834d47eb51cSJinlong Chen 
2835d47eb51cSJinlong Chen 	/**
2836d47eb51cSJinlong Chen 	 * Poll threads so that reset_1 completes freezing channels and gets submitted to
2837d47eb51cSJinlong Chen 	 *  the undelying device.
2838d47eb51cSJinlong Chen 	 */
2839d47eb51cSJinlong Chen 	poll_threads();
2840d47eb51cSJinlong Chen 
2841d47eb51cSJinlong Chen 	/**
2842d47eb51cSJinlong Chen 	 * Complete reset_1. This will start the unfreezing channel stage of reset_1, but
2843d47eb51cSJinlong Chen 	 *  not complete it until next poll_threads.
2844d47eb51cSJinlong Chen 	 */
2845d47eb51cSJinlong Chen 	num_completed = stub_complete_io(g_bdev.io_target, 0);
2846d47eb51cSJinlong Chen 	CU_ASSERT(num_completed == 1);
2847d47eb51cSJinlong Chen 
2848d47eb51cSJinlong Chen 	/**
2849d47eb51cSJinlong Chen 	 * Submit reset_2. It should be queued because reset_1 has not been completed yet.
2850d47eb51cSJinlong Chen 	 */
2851d47eb51cSJinlong Chen 	rc = spdk_bdev_reset(g_desc, io_ch, reset_done, &done_reset_2);
2852d47eb51cSJinlong Chen 	CU_ASSERT(rc == 0);
2853d47eb51cSJinlong Chen 
2854d47eb51cSJinlong Chen 	/**
2855d47eb51cSJinlong Chen 	 * Poll threads. reset_1 completes unfreezing channels, then completes queued reset_2,
2856d47eb51cSJinlong Chen 	 *  and finally itself gets completed.
2857d47eb51cSJinlong Chen 	 */
2858d47eb51cSJinlong Chen 	poll_threads();
2859d47eb51cSJinlong Chen 	CU_ASSERT(done_reset_1 == true);
2860d47eb51cSJinlong Chen 	CU_ASSERT(done_reset_2 == true);
2861d47eb51cSJinlong Chen 
2862d47eb51cSJinlong Chen 	spdk_put_io_channel(io_ch);
2863d47eb51cSJinlong Chen 	teardown_test();
2864d47eb51cSJinlong Chen }
2865d47eb51cSJinlong Chen 
2866674c7097SJim Harris int
2867674c7097SJim Harris main(int argc, char **argv)
2868674c7097SJim Harris {
2869674c7097SJim Harris 	CU_pSuite	suite = NULL;
2870a6e58cc4SMike Gerdts 	CU_pSuite	suite_wt = NULL;
2871674c7097SJim Harris 	unsigned int	num_failures;
2872674c7097SJim Harris 
287378b696bcSVitaliy Mysak 	CU_initialize_registry();
2874674c7097SJim Harris 
2875674c7097SJim Harris 	suite = CU_add_suite("bdev", NULL, NULL);
2876a6e58cc4SMike Gerdts 	suite_wt = CU_add_suite("bdev_wrong_thread", wrong_thread_setup, wrong_thread_teardown);
2877674c7097SJim Harris 
2878dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, basic);
2879dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, unregister_and_close);
2880cf64422aSJim Harris 	CU_ADD_TEST(suite, unregister_and_close_different_threads);
2881dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, basic_qos);
2882dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, put_channel_during_reset);
2883dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, aborted_reset);
2884dfc98943SKrzysztof Karas 	CU_ADD_TEST(suite, aborted_reset_no_outstanding_io);
2885dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, io_during_reset);
2886dfc98943SKrzysztof Karas 	CU_ADD_TEST(suite, reset_completions);
2887dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, io_during_qos_queue);
2888dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, io_during_qos_reset);
2889dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, enomem);
2890dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, enomem_multi_bdev);
28917bcd316dSGangCao 	CU_ADD_TEST(suite, enomem_multi_bdev_unregister);
2892dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, enomem_multi_io_target);
2893*52a41348SJinlong Chen 	CU_ADD_TEST(suite, enomem_retry_during_abort);
2894dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, qos_dynamic_enable);
2895dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, bdev_histograms_mt);
2896dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, bdev_set_io_timeout_mt);
2897dcf0ca15SVitaliy Mysak 	CU_ADD_TEST(suite, lock_lba_range_then_submit_io);
2898494eb6e5SShuhei Matsumoto 	CU_ADD_TEST(suite, unregister_during_reset);
2899a6e58cc4SMike Gerdts 	CU_ADD_TEST(suite_wt, spdk_bdev_register_wt);
2900a6e58cc4SMike Gerdts 	CU_ADD_TEST(suite_wt, spdk_bdev_examine_wt);
29013522d43aSShuhei Matsumoto 	CU_ADD_TEST(suite, event_notify_and_close);
290295df4e0dSShuhei Matsumoto 	CU_ADD_TEST(suite, unregister_and_qos_poller);
2903d47eb51cSJinlong Chen 	CU_ADD_TEST(suite, reset_start_complete_race);
2904674c7097SJim Harris 
2905ea941caeSKonrad Sztyber 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2906674c7097SJim Harris 	CU_cleanup_registry();
2907674c7097SJim Harris 	return num_failures;
2908674c7097SJim Harris }
2909