xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision 5ffa5c003a9ddf6be2bae0496dc20953661ebe1b)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "unit/lib/json_mock.c"
38 #include "lib/ut_multithread.c"
39 
40 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
41 #undef SPDK_CONFIG_VTUNE
42 
43 #include "bdev/bdev.c"
44 
45 #define BDEV_UT_NUM_THREADS 3
46 
47 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
48 		int *sc, int *sk, int *asc, int *ascq));
49 
50 struct ut_bdev {
51 	struct spdk_bdev	bdev;
52 	void			*io_target;
53 };
54 
55 struct ut_bdev_channel {
56 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
57 	uint32_t			outstanding_cnt;
58 	uint32_t			avail_cnt;
59 };
60 
61 int g_io_device;
62 struct ut_bdev g_bdev;
63 struct spdk_bdev_desc *g_desc;
64 bool g_teardown_done = false;
65 bool g_get_io_channel = true;
66 bool g_create_ch = true;
67 
68 static int
69 stub_create_ch(void *io_device, void *ctx_buf)
70 {
71 	struct ut_bdev_channel *ch = ctx_buf;
72 
73 	if (g_create_ch == false) {
74 		return -1;
75 	}
76 
77 	TAILQ_INIT(&ch->outstanding_io);
78 	ch->outstanding_cnt = 0;
79 	/*
80 	 * When avail gets to 0, the submit_request function will return ENOMEM.
81 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
82 	 *  big value that won't get hit.  The ENOMEM tests can then override this
83 	 *  value to something much smaller to induce ENOMEM conditions.
84 	 */
85 	ch->avail_cnt = 2048;
86 	return 0;
87 }
88 
89 static void
90 stub_destroy_ch(void *io_device, void *ctx_buf)
91 {
92 }
93 
94 static struct spdk_io_channel *
95 stub_get_io_channel(void *ctx)
96 {
97 	struct ut_bdev *ut_bdev = ctx;
98 
99 	if (g_get_io_channel == true) {
100 		return spdk_get_io_channel(ut_bdev->io_target);
101 	} else {
102 		return NULL;
103 	}
104 }
105 
106 static int
107 stub_destruct(void *ctx)
108 {
109 	return 0;
110 }
111 
112 static void
113 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
114 {
115 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
116 
117 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
118 		struct spdk_bdev_io *io;
119 
120 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
121 			io = TAILQ_FIRST(&ch->outstanding_io);
122 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
123 			ch->outstanding_cnt--;
124 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
125 			ch->avail_cnt++;
126 		}
127 	}
128 
129 	if (ch->avail_cnt > 0) {
130 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
131 		ch->outstanding_cnt++;
132 		ch->avail_cnt--;
133 	} else {
134 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
135 	}
136 }
137 
138 static uint32_t
139 stub_complete_io(void *io_target, uint32_t num_to_complete)
140 {
141 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
142 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
143 	struct spdk_bdev_io *io;
144 	bool complete_all = (num_to_complete == 0);
145 	uint32_t num_completed = 0;
146 
147 	while (complete_all || num_completed < num_to_complete) {
148 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
149 			break;
150 		}
151 		io = TAILQ_FIRST(&ch->outstanding_io);
152 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
153 		ch->outstanding_cnt--;
154 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
155 		ch->avail_cnt++;
156 		num_completed++;
157 	}
158 
159 	spdk_put_io_channel(_ch);
160 	return num_completed;
161 }
162 
163 static struct spdk_bdev_fn_table fn_table = {
164 	.get_io_channel =	stub_get_io_channel,
165 	.destruct =		stub_destruct,
166 	.submit_request =	stub_submit_request,
167 };
168 
169 static int
170 module_init(void)
171 {
172 	return 0;
173 }
174 
175 static void
176 module_fini(void)
177 {
178 }
179 
180 struct spdk_bdev_module bdev_ut_if = {
181 	.name = "bdev_ut",
182 	.module_init = module_init,
183 	.module_fini = module_fini,
184 };
185 
186 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
187 
188 static void
189 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
190 {
191 	memset(ut_bdev, 0, sizeof(*ut_bdev));
192 
193 	ut_bdev->io_target = io_target;
194 	ut_bdev->bdev.ctxt = ut_bdev;
195 	ut_bdev->bdev.name = name;
196 	ut_bdev->bdev.fn_table = &fn_table;
197 	ut_bdev->bdev.module = &bdev_ut_if;
198 	ut_bdev->bdev.blocklen = 4096;
199 	ut_bdev->bdev.blockcnt = 1024;
200 
201 	spdk_bdev_register(&ut_bdev->bdev);
202 }
203 
204 static void
205 unregister_bdev(struct ut_bdev *ut_bdev)
206 {
207 	/* Handle any deferred messages. */
208 	poll_threads();
209 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
210 	memset(ut_bdev, 0, sizeof(*ut_bdev));
211 }
212 
213 static void
214 bdev_init_cb(void *done, int rc)
215 {
216 	CU_ASSERT(rc == 0);
217 	*(bool *)done = true;
218 }
219 
220 static void
221 setup_test(void)
222 {
223 	bool done = false;
224 
225 	allocate_threads(BDEV_UT_NUM_THREADS);
226 	spdk_bdev_initialize(bdev_init_cb, &done);
227 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
228 				sizeof(struct ut_bdev_channel));
229 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
230 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
231 }
232 
233 static void
234 finish_cb(void *cb_arg)
235 {
236 	g_teardown_done = true;
237 }
238 
239 static void
240 teardown_test(void)
241 {
242 	g_teardown_done = false;
243 	spdk_bdev_close(g_desc);
244 	g_desc = NULL;
245 	unregister_bdev(&g_bdev);
246 	spdk_io_device_unregister(&g_io_device, NULL);
247 	spdk_bdev_finish(finish_cb, NULL);
248 	poll_threads();
249 	CU_ASSERT(g_teardown_done == true);
250 	g_teardown_done = false;
251 	free_threads();
252 }
253 
254 static uint32_t
255 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
256 {
257 	struct spdk_bdev_io *io;
258 	uint32_t cnt = 0;
259 
260 	TAILQ_FOREACH(io, tailq, link) {
261 		cnt++;
262 	}
263 
264 	return cnt;
265 }
266 
267 static void
268 basic(void)
269 {
270 	setup_test();
271 
272 	set_thread(0);
273 
274 	g_get_io_channel = false;
275 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
276 	CU_ASSERT(g_ut_threads[0].ch == NULL);
277 
278 	g_get_io_channel = true;
279 	g_create_ch = false;
280 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
281 	CU_ASSERT(g_ut_threads[0].ch == NULL);
282 
283 	g_get_io_channel = true;
284 	g_create_ch = true;
285 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
286 	CU_ASSERT(g_ut_threads[0].ch != NULL);
287 	spdk_put_io_channel(g_ut_threads[0].ch);
288 
289 	teardown_test();
290 }
291 
292 static int
293 poller_run_done(void *ctx)
294 {
295 	bool	*poller_run = ctx;
296 
297 	*poller_run = true;
298 
299 	return -1;
300 }
301 
302 static int
303 poller_run_times_done(void *ctx)
304 {
305 	int	*poller_run_times = ctx;
306 
307 	(*poller_run_times)++;
308 
309 	return -1;
310 }
311 
312 static void
313 basic_poller(void)
314 {
315 	struct spdk_poller	*poller = NULL;
316 	bool			poller_run = false;
317 	int			poller_run_times = 0;
318 
319 	setup_test();
320 
321 	set_thread(0);
322 	reset_time();
323 	/* Register a poller with no-wait time and test execution */
324 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
325 	CU_ASSERT(poller != NULL);
326 
327 	poll_threads();
328 	CU_ASSERT(poller_run == true);
329 
330 	spdk_poller_unregister(&poller);
331 	CU_ASSERT(poller == NULL);
332 
333 	/* Register a poller with 1000us wait time and test single execution */
334 	poller_run = false;
335 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
336 	CU_ASSERT(poller != NULL);
337 
338 	poll_threads();
339 	CU_ASSERT(poller_run == false);
340 
341 	increment_time(1000);
342 	poll_threads();
343 	CU_ASSERT(poller_run == true);
344 
345 	reset_time();
346 	poller_run = false;
347 	poll_threads();
348 	CU_ASSERT(poller_run == false);
349 
350 	increment_time(1000);
351 	poll_threads();
352 	CU_ASSERT(poller_run == true);
353 
354 	spdk_poller_unregister(&poller);
355 	CU_ASSERT(poller == NULL);
356 
357 	reset_time();
358 	/* Register a poller with 1000us wait time and test multiple execution */
359 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
360 	CU_ASSERT(poller != NULL);
361 
362 	poll_threads();
363 	CU_ASSERT(poller_run_times == 0);
364 
365 	increment_time(1000);
366 	poll_threads();
367 	CU_ASSERT(poller_run_times == 1);
368 
369 	poller_run_times = 0;
370 	increment_time(2000);
371 	poll_threads();
372 	CU_ASSERT(poller_run_times == 2);
373 
374 	spdk_poller_unregister(&poller);
375 	CU_ASSERT(poller == NULL);
376 
377 	teardown_test();
378 }
379 
380 static void
381 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
382 {
383 	bool *done = cb_arg;
384 
385 	CU_ASSERT(success == true);
386 	*done = true;
387 	spdk_bdev_free_io(bdev_io);
388 }
389 
390 static void
391 put_channel_during_reset(void)
392 {
393 	struct spdk_io_channel *io_ch;
394 	bool done = false;
395 
396 	setup_test();
397 
398 	set_thread(0);
399 	io_ch = spdk_bdev_get_io_channel(g_desc);
400 	CU_ASSERT(io_ch != NULL);
401 
402 	/*
403 	 * Start a reset, but then put the I/O channel before
404 	 *  the deferred messages for the reset get a chance to
405 	 *  execute.
406 	 */
407 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
408 	spdk_put_io_channel(io_ch);
409 	poll_threads();
410 	stub_complete_io(g_bdev.io_target, 0);
411 
412 	teardown_test();
413 }
414 
415 static void
416 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
417 {
418 	enum spdk_bdev_io_status *status = cb_arg;
419 
420 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
421 	spdk_bdev_free_io(bdev_io);
422 }
423 
424 static void
425 aborted_reset(void)
426 {
427 	struct spdk_io_channel *io_ch[2];
428 	enum spdk_bdev_io_status status1, status2;
429 
430 	setup_test();
431 
432 	set_thread(0);
433 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
434 	CU_ASSERT(io_ch[0] != NULL);
435 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
436 	poll_threads();
437 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
438 
439 	/*
440 	 * First reset has been submitted on ch0.  Now submit a second
441 	 *  reset on ch1 which will get queued since there is already a
442 	 *  reset in progress.
443 	 */
444 	set_thread(1);
445 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
446 	CU_ASSERT(io_ch[1] != NULL);
447 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
448 	poll_threads();
449 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
450 
451 	/*
452 	 * Now destroy ch1.  This will abort the queued reset.  Check that
453 	 *  the second reset was completed with failed status.  Also check
454 	 *  that bdev->reset_in_progress != NULL, since the original reset
455 	 *  has not been completed yet.  This ensures that the bdev code is
456 	 *  correctly noticing that the failed reset is *not* the one that
457 	 *  had been submitted to the bdev module.
458 	 */
459 	set_thread(1);
460 	spdk_put_io_channel(io_ch[1]);
461 	poll_threads();
462 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
463 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
464 
465 	/*
466 	 * Now complete the first reset, verify that it completed with SUCCESS
467 	 *  status and that bdev->reset_in_progress is also set back to NULL.
468 	 */
469 	set_thread(0);
470 	spdk_put_io_channel(io_ch[0]);
471 	stub_complete_io(g_bdev.io_target, 0);
472 	poll_threads();
473 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
474 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
475 
476 	teardown_test();
477 }
478 
479 static void
480 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
481 {
482 	enum spdk_bdev_io_status *status = cb_arg;
483 
484 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
485 	spdk_bdev_free_io(bdev_io);
486 }
487 
488 static void
489 io_during_reset(void)
490 {
491 	struct spdk_io_channel *io_ch[2];
492 	struct spdk_bdev_channel *bdev_ch[2];
493 	enum spdk_bdev_io_status status0, status1, status_reset;
494 	int rc;
495 
496 	setup_test();
497 
498 	/*
499 	 * First test normal case - submit an I/O on each of two channels (with no resets)
500 	 *  and verify they complete successfully.
501 	 */
502 	set_thread(0);
503 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
504 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
505 	CU_ASSERT(bdev_ch[0]->flags == 0);
506 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
507 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
508 	CU_ASSERT(rc == 0);
509 
510 	set_thread(1);
511 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
512 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
513 	CU_ASSERT(bdev_ch[1]->flags == 0);
514 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
515 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
516 	CU_ASSERT(rc == 0);
517 
518 	poll_threads();
519 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
520 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
521 
522 	set_thread(0);
523 	stub_complete_io(g_bdev.io_target, 0);
524 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
525 
526 	set_thread(1);
527 	stub_complete_io(g_bdev.io_target, 0);
528 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
529 
530 	/*
531 	 * Now submit a reset, and leave it pending while we submit I/O on two different
532 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
533 	 *  progress.
534 	 */
535 	set_thread(0);
536 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
537 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
538 	CU_ASSERT(rc == 0);
539 
540 	CU_ASSERT(bdev_ch[0]->flags == 0);
541 	CU_ASSERT(bdev_ch[1]->flags == 0);
542 	poll_threads();
543 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
544 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
545 
546 	set_thread(0);
547 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
548 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
549 	CU_ASSERT(rc == 0);
550 
551 	set_thread(1);
552 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
553 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
554 	CU_ASSERT(rc == 0);
555 
556 	/*
557 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
558 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
559 	 */
560 	poll_threads();
561 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
562 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
563 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
564 
565 	/*
566 	 * Complete the reset
567 	 */
568 	set_thread(0);
569 	stub_complete_io(g_bdev.io_target, 0);
570 
571 	/*
572 	 * Only poll thread 0. We should not get a completion.
573 	 */
574 	poll_thread(0);
575 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
576 
577 	/*
578 	 * Poll both thread 0 and 1 so the messages can propagate and we
579 	 * get a completion.
580 	 */
581 	poll_threads();
582 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
583 
584 	spdk_put_io_channel(io_ch[0]);
585 	set_thread(1);
586 	spdk_put_io_channel(io_ch[1]);
587 	poll_threads();
588 
589 	teardown_test();
590 }
591 
592 static void
593 basic_qos(void)
594 {
595 	struct spdk_io_channel *io_ch[3];
596 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
597 	struct spdk_bdev *bdev;
598 	enum spdk_bdev_io_status status;
599 	struct spdk_bdev_module_channel *module_ch;
600 	int rc;
601 
602 	setup_test();
603 
604 	/*
605 	 * First test normal case - submit an I/O on the channel (QoS not enabled)
606 	 *  and verify it completes successfully.
607 	 */
608 	set_thread(0);
609 	g_get_io_channel = false;
610 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
611 	CU_ASSERT(io_ch[0] == NULL);
612 	g_get_io_channel = true;
613 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
614 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
615 	status = SPDK_BDEV_IO_STATUS_PENDING;
616 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
617 	CU_ASSERT(rc == 0);
618 	CU_ASSERT(bdev_ch[0]->flags == 0);
619 
620 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
621 
622 	set_thread(0);
623 	stub_complete_io(g_bdev.io_target, 0);
624 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
625 
626 	poll_threads();
627 
628 	set_thread(1);
629 	bdev = &g_bdev.bdev;
630 	bdev->ios_per_sec = 2000;
631 	g_get_io_channel = false;
632 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
633 	CU_ASSERT(io_ch[1] == NULL);
634 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
635 	qos_bdev_ch = bdev->qos_channel;
636 	CU_ASSERT(qos_bdev_ch == NULL);
637 	g_get_io_channel = true;
638 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
639 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
640 	qos_bdev_ch = bdev->qos_channel;
641 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
642 	CU_ASSERT(qos_bdev_ch != NULL);
643 	module_ch = qos_bdev_ch->module_ch;
644 	CU_ASSERT(module_ch->io_outstanding == 0);
645 	CU_ASSERT(g_ut_threads[1].thread == bdev->qos_thread);
646 
647 	/*
648 	 * Now sending one I/O on first channel
649 	 */
650 	set_thread(0);
651 	status = SPDK_BDEV_IO_STATUS_PENDING;
652 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
653 	CU_ASSERT(rc == 0);
654 
655 	poll_threads();
656 	CU_ASSERT(module_ch->io_outstanding == 1);
657 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
658 
659 	/*
660 	 * IO is operated on thread_id(1) via the QoS thread
661 	 */
662 	set_thread(1);
663 	stub_complete_io(g_bdev.io_target, 1);
664 
665 	poll_threads();
666 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
667 
668 	/*
669 	 * QoS thread is on thread 1. Put I/O channel on thread 1 first
670 	 * to trigger an async destruction of QoS bdev channel.
671 	 */
672 	set_thread(1);
673 	spdk_put_io_channel(io_ch[0]);
674 	set_thread(0);
675 	spdk_put_io_channel(io_ch[1]);
676 
677 	/*
678 	 * Handle the messages on thread 1 first so that the QoS bdev
679 	 * channel destroy message from thread 0 handling will be active
680 	 * there.
681 	 */
682 	poll_thread(1);
683 	poll_thread(0);
684 
685 	/*
686 	 * Create a new I/O channel when the async destruction of QoS
687 	 * bdev channel is on going. The expected result is the QoS bdev
688 	 * channel will be properly setup again.
689 	 */
690 	set_thread(2);
691 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
692 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
693 
694 	poll_threads();
695 
696 	qos_bdev_ch = bdev->qos_channel;
697 	CU_ASSERT(qos_bdev_ch->flags == BDEV_CH_QOS_ENABLED);
698 	CU_ASSERT(qos_bdev_ch != NULL);
699 	module_ch = qos_bdev_ch->module_ch;
700 	CU_ASSERT(module_ch->io_outstanding == 0);
701 	CU_ASSERT(g_ut_threads[2].thread == bdev->qos_thread);
702 
703 	/*
704 	 * Destroy the last I/O channel so that the QoS bdev channel
705 	 * will be destroyed.
706 	 */
707 	set_thread(2);
708 	spdk_put_io_channel(io_ch[2]);
709 
710 	poll_threads();
711 
712 	teardown_test();
713 }
714 
715 static void
716 io_during_qos(void)
717 {
718 	struct spdk_io_channel *io_ch[3];
719 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
720 	struct spdk_bdev *bdev;
721 	enum spdk_bdev_io_status status0, status1;
722 	struct spdk_bdev_module_channel *module_ch;
723 	int rc;
724 
725 	setup_test();
726 
727 	/*
728 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
729 	 *  and verify they complete successfully.
730 	 */
731 	set_thread(0);
732 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
733 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
734 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
735 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
736 	CU_ASSERT(rc == 0);
737 	CU_ASSERT(bdev_ch[0]->flags == 0);
738 
739 	set_thread(1);
740 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
741 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
742 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
743 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
744 	CU_ASSERT(rc == 0);
745 	CU_ASSERT(bdev_ch[1]->flags == 0);
746 
747 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
748 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
749 
750 	set_thread(0);
751 	stub_complete_io(g_bdev.io_target, 0);
752 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
753 
754 	set_thread(1);
755 	stub_complete_io(g_bdev.io_target, 0);
756 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
757 
758 	poll_threads();
759 
760 	set_thread(2);
761 	bdev = &g_bdev.bdev;
762 	/*
763 	 * 10 IOs allowed per millisecond
764 	 */
765 	bdev->ios_per_sec = 10000;
766 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
767 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
768 	qos_bdev_ch = bdev->qos_channel;
769 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
770 	CU_ASSERT(qos_bdev_ch != NULL);
771 	module_ch = qos_bdev_ch->module_ch;
772 	CU_ASSERT(module_ch->io_outstanding == 0);
773 
774 	/*
775 	 * Now sending some I/Os on different channels when QoS has been enabled
776 	 */
777 	set_thread(0);
778 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
779 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
780 	CU_ASSERT(rc == 0);
781 
782 	set_thread(1);
783 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
784 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
785 	CU_ASSERT(rc == 0);
786 
787 	poll_threads();
788 	CU_ASSERT(module_ch->io_outstanding == 2);
789 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
790 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
791 
792 	/*
793 	 * IOs are operated on thread_id(2) via the QoS thread
794 	 */
795 	set_thread(2);
796 	stub_complete_io(g_bdev.io_target, 2);
797 
798 	poll_threads();
799 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
800 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
801 
802 	set_thread(0);
803 	spdk_put_io_channel(io_ch[0]);
804 	set_thread(1);
805 	spdk_put_io_channel(io_ch[1]);
806 	set_thread(2);
807 	spdk_put_io_channel(io_ch[2]);
808 
809 	poll_threads();
810 
811 	teardown_test();
812 }
813 
814 static void
815 io_during_qos_queue(void)
816 {
817 	struct spdk_io_channel *io_ch[3];
818 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
819 	struct spdk_bdev *bdev;
820 	enum spdk_bdev_io_status status0, status1;
821 	struct spdk_bdev_module_channel *module_ch;
822 	int rc;
823 
824 	setup_test();
825 	reset_time();
826 
827 	/*
828 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
829 	 *  and verify they complete successfully.
830 	 */
831 	set_thread(0);
832 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
833 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
834 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
835 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
836 	CU_ASSERT(rc == 0);
837 	CU_ASSERT(bdev_ch[0]->flags == 0);
838 
839 	set_thread(1);
840 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
841 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
842 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
843 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
844 	CU_ASSERT(rc == 0);
845 	CU_ASSERT(bdev_ch[1]->flags == 0);
846 
847 	poll_threads();
848 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
849 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
850 
851 	set_thread(0);
852 	stub_complete_io(g_bdev.io_target, 0);
853 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
854 
855 	set_thread(1);
856 	stub_complete_io(g_bdev.io_target, 0);
857 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
858 
859 	poll_threads();
860 
861 	set_thread(2);
862 	bdev = bdev_ch[0]->bdev;
863 	/*
864 	 * Only 1 IO allowed per millisecond. More IOs will be queued.
865 	 */
866 	bdev->ios_per_sec = 1000;
867 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
868 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
869 	qos_bdev_ch = bdev->qos_channel;
870 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
871 	CU_ASSERT(qos_bdev_ch != NULL);
872 	module_ch = qos_bdev_ch->module_ch;
873 	CU_ASSERT(module_ch->io_outstanding == 0);
874 
875 	/*
876 	 * Now sending some I/Os on different channels when QoS has been enabled
877 	 */
878 	set_thread(0);
879 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
880 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
881 	CU_ASSERT(rc == 0);
882 
883 	set_thread(1);
884 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
885 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
886 	CU_ASSERT(rc == 0);
887 
888 	/*
889 	 * Poll the QoS thread to send the allowed I/O down
890 	 */
891 	poll_threads();
892 	CU_ASSERT(module_ch->io_outstanding == 1);
893 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 1);
894 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
895 
896 	/*
897 	 * Increase the time and poll the QoS thread to run the periodical poller
898 	 */
899 	increment_time(1000);
900 	poll_threads();
901 	CU_ASSERT(module_ch->io_outstanding == 2);
902 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 0);
903 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
904 
905 	/*
906 	 * IOs are handled on the thread(2) as the master thread
907 	 */
908 	set_thread(2);
909 	stub_complete_io(g_bdev.io_target, 0);
910 	spdk_put_io_channel(io_ch[0]);
911 	spdk_put_io_channel(io_ch[1]);
912 	spdk_put_io_channel(io_ch[2]);
913 
914 	poll_threads();
915 
916 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
917 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
918 
919 	teardown_test();
920 }
921 
922 static void
923 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
924 {
925 	enum spdk_bdev_io_status *status = cb_arg;
926 
927 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
928 	spdk_bdev_free_io(bdev_io);
929 }
930 
931 static void
932 enomem(void)
933 {
934 	struct spdk_io_channel *io_ch;
935 	struct spdk_bdev_channel *bdev_ch;
936 	struct spdk_bdev_module_channel *module_ch;
937 	struct ut_bdev_channel *ut_ch;
938 	const uint32_t IO_ARRAY_SIZE = 64;
939 	const uint32_t AVAIL = 20;
940 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
941 	uint32_t nomem_cnt, i;
942 	struct spdk_bdev_io *first_io;
943 	int rc;
944 
945 	setup_test();
946 
947 	set_thread(0);
948 	io_ch = spdk_bdev_get_io_channel(g_desc);
949 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
950 	module_ch = bdev_ch->module_ch;
951 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
952 	ut_ch->avail_cnt = AVAIL;
953 
954 	/* First submit a number of IOs equal to what the channel can support. */
955 	for (i = 0; i < AVAIL; i++) {
956 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
957 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
958 		CU_ASSERT(rc == 0);
959 	}
960 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
961 
962 	/*
963 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
964 	 *  the enomem_io list.
965 	 */
966 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
967 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
968 	CU_ASSERT(rc == 0);
969 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
970 	first_io = TAILQ_FIRST(&module_ch->nomem_io);
971 
972 	/*
973 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
974 	 *  the first_io above.
975 	 */
976 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
977 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
978 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
979 		CU_ASSERT(rc == 0);
980 	}
981 
982 	/* Assert that first_io is still at the head of the list. */
983 	CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io);
984 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
985 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
986 	CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
987 
988 	/*
989 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
990 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
991 	 *  list.
992 	 */
993 	stub_complete_io(g_bdev.io_target, 1);
994 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
995 
996 	/*
997 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
998 	 *  and we should see I/O get resubmitted to the test bdev module.
999 	 */
1000 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1001 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt);
1002 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
1003 
1004 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1005 	stub_complete_io(g_bdev.io_target, 1);
1006 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
1007 
1008 	/*
1009 	 * Send a reset and confirm that all I/O are completed, including the ones that
1010 	 *  were queued on the nomem_io list.
1011 	 */
1012 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1013 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1014 	poll_threads();
1015 	CU_ASSERT(rc == 0);
1016 	/* This will complete the reset. */
1017 	stub_complete_io(g_bdev.io_target, 0);
1018 
1019 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0);
1020 	CU_ASSERT(module_ch->io_outstanding == 0);
1021 
1022 	spdk_put_io_channel(io_ch);
1023 	poll_threads();
1024 	teardown_test();
1025 }
1026 
1027 static void
1028 enomem_multi_bdev(void)
1029 {
1030 	struct spdk_io_channel *io_ch;
1031 	struct spdk_bdev_channel *bdev_ch;
1032 	struct spdk_bdev_module_channel *module_ch;
1033 	struct ut_bdev_channel *ut_ch;
1034 	const uint32_t IO_ARRAY_SIZE = 64;
1035 	const uint32_t AVAIL = 20;
1036 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1037 	uint32_t i;
1038 	struct ut_bdev *second_bdev;
1039 	struct spdk_bdev_desc *second_desc;
1040 	struct spdk_bdev_channel *second_bdev_ch;
1041 	struct spdk_io_channel *second_ch;
1042 	int rc;
1043 
1044 	setup_test();
1045 
1046 	/* Register second bdev with the same io_target  */
1047 	second_bdev = calloc(1, sizeof(*second_bdev));
1048 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1049 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1050 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1051 
1052 	set_thread(0);
1053 	io_ch = spdk_bdev_get_io_channel(g_desc);
1054 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1055 	module_ch = bdev_ch->module_ch;
1056 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1057 	ut_ch->avail_cnt = AVAIL;
1058 
1059 	second_ch = spdk_bdev_get_io_channel(second_desc);
1060 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1061 	SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch);
1062 
1063 	/* Saturate io_target through bdev A. */
1064 	for (i = 0; i < AVAIL; i++) {
1065 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1066 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1067 		CU_ASSERT(rc == 0);
1068 	}
1069 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
1070 
1071 	/*
1072 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1073 	 * and then go onto the nomem_io list.
1074 	 */
1075 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1076 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1077 	CU_ASSERT(rc == 0);
1078 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
1079 
1080 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1081 	stub_complete_io(g_bdev.io_target, AVAIL);
1082 
1083 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io));
1084 	CU_ASSERT(module_ch->io_outstanding == 1);
1085 
1086 	/* Now complete our retried I/O  */
1087 	stub_complete_io(g_bdev.io_target, 1);
1088 	SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0);
1089 
1090 	spdk_put_io_channel(io_ch);
1091 	spdk_put_io_channel(second_ch);
1092 	spdk_bdev_close(second_desc);
1093 	unregister_bdev(second_bdev);
1094 	free(second_bdev);
1095 	poll_threads();
1096 	teardown_test();
1097 }
1098 
1099 int
1100 main(int argc, char **argv)
1101 {
1102 	CU_pSuite	suite = NULL;
1103 	unsigned int	num_failures;
1104 
1105 	if (CU_initialize_registry() != CUE_SUCCESS) {
1106 		return CU_get_error();
1107 	}
1108 
1109 	suite = CU_add_suite("bdev", NULL, NULL);
1110 	if (suite == NULL) {
1111 		CU_cleanup_registry();
1112 		return CU_get_error();
1113 	}
1114 
1115 	if (
1116 		CU_add_test(suite, "basic", basic) == NULL ||
1117 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1118 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1119 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1120 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1121 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1122 		CU_add_test(suite, "io_during_qos", io_during_qos) == NULL ||
1123 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1124 		CU_add_test(suite, "enomem", enomem) == NULL ||
1125 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL
1126 	) {
1127 		CU_cleanup_registry();
1128 		return CU_get_error();
1129 	}
1130 
1131 	CU_basic_set_mode(CU_BRM_VERBOSE);
1132 	CU_basic_run_tests();
1133 	num_failures = CU_get_number_of_failures();
1134 	CU_cleanup_registry();
1135 	return num_failures;
1136 }
1137