xref: /spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c (revision c3bc40a6ef659197499eb2d01f3da092071a8f77)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk_cunit.h"
35 
36 #include "lib/test_env.c"
37 #include "lib/ut_multithread.c"
38 
39 /* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
40 #undef SPDK_CONFIG_VTUNE
41 
42 #include "bdev/bdev.c"
43 
44 #define BDEV_UT_NUM_THREADS 3
45 
46 DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
47 		int *sc, int *sk, int *asc, int *ascq));
48 
49 struct ut_bdev {
50 	struct spdk_bdev	bdev;
51 	void			*io_target;
52 };
53 
54 struct ut_bdev_channel {
55 	TAILQ_HEAD(, spdk_bdev_io)	outstanding_io;
56 	uint32_t			outstanding_cnt;
57 	uint32_t			avail_cnt;
58 };
59 
60 int g_io_device;
61 struct ut_bdev g_bdev;
62 struct spdk_bdev_desc *g_desc;
63 bool g_teardown_done = false;
64 bool g_get_io_channel = true;
65 bool g_create_ch = true;
66 
67 static int
68 stub_create_ch(void *io_device, void *ctx_buf)
69 {
70 	struct ut_bdev_channel *ch = ctx_buf;
71 
72 	if (g_create_ch == false) {
73 		return -1;
74 	}
75 
76 	TAILQ_INIT(&ch->outstanding_io);
77 	ch->outstanding_cnt = 0;
78 	/*
79 	 * When avail gets to 0, the submit_request function will return ENOMEM.
80 	 *  Most tests to not want ENOMEM to occur, so by default set this to a
81 	 *  big value that won't get hit.  The ENOMEM tests can then override this
82 	 *  value to something much smaller to induce ENOMEM conditions.
83 	 */
84 	ch->avail_cnt = 2048;
85 	return 0;
86 }
87 
88 static void
89 stub_destroy_ch(void *io_device, void *ctx_buf)
90 {
91 }
92 
93 static struct spdk_io_channel *
94 stub_get_io_channel(void *ctx)
95 {
96 	struct ut_bdev *ut_bdev = ctx;
97 
98 	if (g_get_io_channel == true) {
99 		return spdk_get_io_channel(ut_bdev->io_target);
100 	} else {
101 		return NULL;
102 	}
103 }
104 
105 static int
106 stub_destruct(void *ctx)
107 {
108 	return 0;
109 }
110 
111 static void
112 stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
113 {
114 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
115 
116 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
117 		struct spdk_bdev_io *io;
118 
119 		while (!TAILQ_EMPTY(&ch->outstanding_io)) {
120 			io = TAILQ_FIRST(&ch->outstanding_io);
121 			TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
122 			ch->outstanding_cnt--;
123 			spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
124 			ch->avail_cnt++;
125 		}
126 	}
127 
128 	if (ch->avail_cnt > 0) {
129 		TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
130 		ch->outstanding_cnt++;
131 		ch->avail_cnt--;
132 	} else {
133 		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
134 	}
135 }
136 
137 static uint32_t
138 stub_complete_io(void *io_target, uint32_t num_to_complete)
139 {
140 	struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
141 	struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
142 	struct spdk_bdev_io *io;
143 	bool complete_all = (num_to_complete == 0);
144 	uint32_t num_completed = 0;
145 
146 	while (complete_all || num_completed < num_to_complete) {
147 		if (TAILQ_EMPTY(&ch->outstanding_io)) {
148 			break;
149 		}
150 		io = TAILQ_FIRST(&ch->outstanding_io);
151 		TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
152 		ch->outstanding_cnt--;
153 		spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
154 		ch->avail_cnt++;
155 		num_completed++;
156 	}
157 
158 	spdk_put_io_channel(_ch);
159 	return num_completed;
160 }
161 
162 static struct spdk_bdev_fn_table fn_table = {
163 	.get_io_channel =	stub_get_io_channel,
164 	.destruct =		stub_destruct,
165 	.submit_request =	stub_submit_request,
166 };
167 
168 static int
169 module_init(void)
170 {
171 	return 0;
172 }
173 
174 static void
175 module_fini(void)
176 {
177 }
178 
179 struct spdk_bdev_module bdev_ut_if = {
180 	.name = "bdev_ut",
181 	.module_init = module_init,
182 	.module_fini = module_fini,
183 };
184 
185 SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
186 
187 static void
188 register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
189 {
190 	memset(ut_bdev, 0, sizeof(*ut_bdev));
191 
192 	ut_bdev->io_target = io_target;
193 	ut_bdev->bdev.ctxt = ut_bdev;
194 	ut_bdev->bdev.name = name;
195 	ut_bdev->bdev.fn_table = &fn_table;
196 	ut_bdev->bdev.module = &bdev_ut_if;
197 	ut_bdev->bdev.blocklen = 4096;
198 	ut_bdev->bdev.blockcnt = 1024;
199 
200 	spdk_bdev_register(&ut_bdev->bdev);
201 }
202 
203 static void
204 unregister_bdev(struct ut_bdev *ut_bdev)
205 {
206 	/* Handle any deferred messages. */
207 	poll_threads();
208 	spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
209 	memset(ut_bdev, 0, sizeof(*ut_bdev));
210 }
211 
212 static void
213 bdev_init_cb(void *done, int rc)
214 {
215 	CU_ASSERT(rc == 0);
216 	*(bool *)done = true;
217 }
218 
219 static void
220 setup_test(void)
221 {
222 	bool done = false;
223 
224 	allocate_threads(BDEV_UT_NUM_THREADS);
225 	spdk_bdev_initialize(bdev_init_cb, &done);
226 	spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
227 				sizeof(struct ut_bdev_channel));
228 	register_bdev(&g_bdev, "ut_bdev", &g_io_device);
229 	spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
230 }
231 
232 static void
233 finish_cb(void *cb_arg)
234 {
235 	g_teardown_done = true;
236 }
237 
238 static void
239 teardown_test(void)
240 {
241 	g_teardown_done = false;
242 	spdk_bdev_close(g_desc);
243 	g_desc = NULL;
244 	unregister_bdev(&g_bdev);
245 	spdk_io_device_unregister(&g_io_device, NULL);
246 	spdk_bdev_finish(finish_cb, NULL);
247 	poll_threads();
248 	CU_ASSERT(g_teardown_done == true);
249 	g_teardown_done = false;
250 	free_threads();
251 }
252 
253 static uint32_t
254 bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
255 {
256 	struct spdk_bdev_io *io;
257 	uint32_t cnt = 0;
258 
259 	TAILQ_FOREACH(io, tailq, link) {
260 		cnt++;
261 	}
262 
263 	return cnt;
264 }
265 
266 static void
267 basic(void)
268 {
269 	setup_test();
270 
271 	set_thread(0);
272 
273 	g_get_io_channel = false;
274 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
275 	CU_ASSERT(g_ut_threads[0].ch == NULL);
276 
277 	g_get_io_channel = true;
278 	g_create_ch = false;
279 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
280 	CU_ASSERT(g_ut_threads[0].ch == NULL);
281 
282 	g_get_io_channel = true;
283 	g_create_ch = true;
284 	g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
285 	CU_ASSERT(g_ut_threads[0].ch != NULL);
286 	spdk_put_io_channel(g_ut_threads[0].ch);
287 
288 	teardown_test();
289 }
290 
291 static int
292 poller_run_done(void *ctx)
293 {
294 	bool	*poller_run = ctx;
295 
296 	*poller_run = true;
297 
298 	return -1;
299 }
300 
301 static int
302 poller_run_times_done(void *ctx)
303 {
304 	int	*poller_run_times = ctx;
305 
306 	(*poller_run_times)++;
307 
308 	return -1;
309 }
310 
311 static void
312 basic_poller(void)
313 {
314 	struct spdk_poller	*poller = NULL;
315 	bool			poller_run = false;
316 	int			poller_run_times = 0;
317 
318 	setup_test();
319 
320 	set_thread(0);
321 	reset_time();
322 	/* Register a poller with no-wait time and test execution */
323 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
324 	CU_ASSERT(poller != NULL);
325 
326 	poll_threads();
327 	CU_ASSERT(poller_run == true);
328 
329 	spdk_poller_unregister(&poller);
330 	CU_ASSERT(poller == NULL);
331 
332 	/* Register a poller with 1000us wait time and test single execution */
333 	poller_run = false;
334 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
335 	CU_ASSERT(poller != NULL);
336 
337 	poll_threads();
338 	CU_ASSERT(poller_run == false);
339 
340 	increment_time(1000);
341 	poll_threads();
342 	CU_ASSERT(poller_run == true);
343 
344 	reset_time();
345 	poller_run = false;
346 	poll_threads();
347 	CU_ASSERT(poller_run == false);
348 
349 	increment_time(1000);
350 	poll_threads();
351 	CU_ASSERT(poller_run == true);
352 
353 	spdk_poller_unregister(&poller);
354 	CU_ASSERT(poller == NULL);
355 
356 	reset_time();
357 	/* Register a poller with 1000us wait time and test multiple execution */
358 	poller = spdk_poller_register(poller_run_times_done, &poller_run_times, 1000);
359 	CU_ASSERT(poller != NULL);
360 
361 	poll_threads();
362 	CU_ASSERT(poller_run_times == 0);
363 
364 	increment_time(1000);
365 	poll_threads();
366 	CU_ASSERT(poller_run_times == 1);
367 
368 	poller_run_times = 0;
369 	increment_time(2000);
370 	poll_threads();
371 	CU_ASSERT(poller_run_times == 2);
372 
373 	spdk_poller_unregister(&poller);
374 	CU_ASSERT(poller == NULL);
375 
376 	teardown_test();
377 }
378 
379 static void
380 reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
381 {
382 	bool *done = cb_arg;
383 
384 	CU_ASSERT(success == true);
385 	*done = true;
386 	spdk_bdev_free_io(bdev_io);
387 }
388 
389 static void
390 put_channel_during_reset(void)
391 {
392 	struct spdk_io_channel *io_ch;
393 	bool done = false;
394 
395 	setup_test();
396 
397 	set_thread(0);
398 	io_ch = spdk_bdev_get_io_channel(g_desc);
399 	CU_ASSERT(io_ch != NULL);
400 
401 	/*
402 	 * Start a reset, but then put the I/O channel before
403 	 *  the deferred messages for the reset get a chance to
404 	 *  execute.
405 	 */
406 	spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
407 	spdk_put_io_channel(io_ch);
408 	poll_threads();
409 	stub_complete_io(g_bdev.io_target, 0);
410 
411 	teardown_test();
412 }
413 
414 static void
415 aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
416 {
417 	enum spdk_bdev_io_status *status = cb_arg;
418 
419 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
420 	spdk_bdev_free_io(bdev_io);
421 }
422 
423 static void
424 aborted_reset(void)
425 {
426 	struct spdk_io_channel *io_ch[2];
427 	enum spdk_bdev_io_status status1, status2;
428 
429 	setup_test();
430 
431 	set_thread(0);
432 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
433 	CU_ASSERT(io_ch[0] != NULL);
434 	spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
435 	poll_threads();
436 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
437 
438 	/*
439 	 * First reset has been submitted on ch0.  Now submit a second
440 	 *  reset on ch1 which will get queued since there is already a
441 	 *  reset in progress.
442 	 */
443 	set_thread(1);
444 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
445 	CU_ASSERT(io_ch[1] != NULL);
446 	spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
447 	poll_threads();
448 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
449 
450 	/*
451 	 * Now destroy ch1.  This will abort the queued reset.  Check that
452 	 *  the second reset was completed with failed status.  Also check
453 	 *  that bdev->reset_in_progress != NULL, since the original reset
454 	 *  has not been completed yet.  This ensures that the bdev code is
455 	 *  correctly noticing that the failed reset is *not* the one that
456 	 *  had been submitted to the bdev module.
457 	 */
458 	set_thread(1);
459 	spdk_put_io_channel(io_ch[1]);
460 	poll_threads();
461 	CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
462 	CU_ASSERT(g_bdev.bdev.reset_in_progress != NULL);
463 
464 	/*
465 	 * Now complete the first reset, verify that it completed with SUCCESS
466 	 *  status and that bdev->reset_in_progress is also set back to NULL.
467 	 */
468 	set_thread(0);
469 	spdk_put_io_channel(io_ch[0]);
470 	stub_complete_io(g_bdev.io_target, 0);
471 	poll_threads();
472 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
473 	CU_ASSERT(g_bdev.bdev.reset_in_progress == NULL);
474 
475 	teardown_test();
476 }
477 
478 static void
479 io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
480 {
481 	enum spdk_bdev_io_status *status = cb_arg;
482 
483 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
484 	spdk_bdev_free_io(bdev_io);
485 }
486 
487 static void
488 io_during_reset(void)
489 {
490 	struct spdk_io_channel *io_ch[2];
491 	struct spdk_bdev_channel *bdev_ch[2];
492 	enum spdk_bdev_io_status status0, status1, status_reset;
493 	int rc;
494 
495 	setup_test();
496 
497 	/*
498 	 * First test normal case - submit an I/O on each of two channels (with no resets)
499 	 *  and verify they complete successfully.
500 	 */
501 	set_thread(0);
502 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
503 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
504 	CU_ASSERT(bdev_ch[0]->flags == 0);
505 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
506 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
507 	CU_ASSERT(rc == 0);
508 
509 	set_thread(1);
510 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
511 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
512 	CU_ASSERT(bdev_ch[1]->flags == 0);
513 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
514 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
515 	CU_ASSERT(rc == 0);
516 
517 	poll_threads();
518 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
519 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
520 
521 	set_thread(0);
522 	stub_complete_io(g_bdev.io_target, 0);
523 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
524 
525 	set_thread(1);
526 	stub_complete_io(g_bdev.io_target, 0);
527 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
528 
529 	/*
530 	 * Now submit a reset, and leave it pending while we submit I/O on two different
531 	 *  channels.  These I/O should be failed by the bdev layer since the reset is in
532 	 *  progress.
533 	 */
534 	set_thread(0);
535 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
536 	rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
537 	CU_ASSERT(rc == 0);
538 
539 	CU_ASSERT(bdev_ch[0]->flags == 0);
540 	CU_ASSERT(bdev_ch[1]->flags == 0);
541 	poll_threads();
542 	CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
543 	CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
544 
545 	set_thread(0);
546 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
547 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
548 	CU_ASSERT(rc == 0);
549 
550 	set_thread(1);
551 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
552 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
553 	CU_ASSERT(rc == 0);
554 
555 	/*
556 	 * A reset is in progress so these read I/O should complete with failure.  Note that we
557 	 *  need to poll_threads() since I/O completed inline have their completion deferred.
558 	 */
559 	poll_threads();
560 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
561 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
562 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
563 
564 	/*
565 	 * Complete the reset
566 	 */
567 	set_thread(0);
568 	stub_complete_io(g_bdev.io_target, 0);
569 
570 	/*
571 	 * Only poll thread 0. We should not get a completion.
572 	 */
573 	poll_thread(0);
574 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
575 
576 	/*
577 	 * Poll both thread 0 and 1 so the messages can propagate and we
578 	 * get a completion.
579 	 */
580 	poll_threads();
581 	CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
582 
583 	spdk_put_io_channel(io_ch[0]);
584 	set_thread(1);
585 	spdk_put_io_channel(io_ch[1]);
586 	poll_threads();
587 
588 	teardown_test();
589 }
590 
591 static void
592 basic_qos(void)
593 {
594 	struct spdk_io_channel *io_ch[3];
595 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
596 	struct spdk_bdev *bdev;
597 	enum spdk_bdev_io_status status;
598 	struct spdk_bdev_module_channel *module_ch;
599 	int rc;
600 
601 	setup_test();
602 
603 	/*
604 	 * First test normal case - submit an I/O on the channel (QoS not enabled)
605 	 *  and verify it completes successfully.
606 	 */
607 	set_thread(0);
608 	g_get_io_channel = false;
609 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
610 	CU_ASSERT(io_ch[0] == NULL);
611 	g_get_io_channel = true;
612 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
613 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
614 	status = SPDK_BDEV_IO_STATUS_PENDING;
615 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
616 	CU_ASSERT(rc == 0);
617 	CU_ASSERT(bdev_ch[0]->flags == 0);
618 
619 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
620 
621 	set_thread(0);
622 	stub_complete_io(g_bdev.io_target, 0);
623 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
624 
625 	poll_threads();
626 
627 	set_thread(1);
628 	bdev = &g_bdev.bdev;
629 	bdev->ios_per_sec = 2000;
630 	g_get_io_channel = false;
631 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
632 	CU_ASSERT(io_ch[1] == NULL);
633 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
634 	qos_bdev_ch = bdev->qos_channel;
635 	CU_ASSERT(qos_bdev_ch == NULL);
636 	g_get_io_channel = true;
637 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
638 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
639 	qos_bdev_ch = bdev->qos_channel;
640 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
641 	CU_ASSERT(qos_bdev_ch != NULL);
642 	module_ch = qos_bdev_ch->module_ch;
643 	CU_ASSERT(module_ch->io_outstanding == 0);
644 	CU_ASSERT(g_ut_threads[1].thread == bdev->qos_thread);
645 
646 	/*
647 	 * Now sending one I/O on first channel
648 	 */
649 	set_thread(0);
650 	status = SPDK_BDEV_IO_STATUS_PENDING;
651 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
652 	CU_ASSERT(rc == 0);
653 
654 	poll_threads();
655 	CU_ASSERT(module_ch->io_outstanding == 1);
656 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
657 
658 	/*
659 	 * IO is operated on thread_id(1) via the QoS thread
660 	 */
661 	set_thread(1);
662 	stub_complete_io(g_bdev.io_target, 1);
663 
664 	poll_threads();
665 	CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
666 
667 	/*
668 	 * QoS thread is on thread 1. Put I/O channel on thread 1 first
669 	 * to trigger an async destruction of QoS bdev channel.
670 	 */
671 	set_thread(1);
672 	spdk_put_io_channel(io_ch[0]);
673 	set_thread(0);
674 	spdk_put_io_channel(io_ch[1]);
675 
676 	/*
677 	 * Handle the messages on thread 1 first so that the QoS bdev
678 	 * channel destroy message from thread 0 handling will be active
679 	 * there.
680 	 */
681 	poll_thread(1);
682 	poll_thread(0);
683 
684 	/*
685 	 * Create a new I/O channel when the async destruction of QoS
686 	 * bdev channel is on going. The expected result is the QoS bdev
687 	 * channel will be properly setup again.
688 	 */
689 	set_thread(2);
690 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
691 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
692 
693 	poll_threads();
694 
695 	qos_bdev_ch = bdev->qos_channel;
696 	CU_ASSERT(qos_bdev_ch->flags == BDEV_CH_QOS_ENABLED);
697 	CU_ASSERT(qos_bdev_ch != NULL);
698 	module_ch = qos_bdev_ch->module_ch;
699 	CU_ASSERT(module_ch->io_outstanding == 0);
700 	CU_ASSERT(g_ut_threads[1].thread == bdev->qos_thread);
701 
702 	/*
703 	 * Destroy the last I/O channel so that the QoS bdev channel
704 	 * will be destroyed.
705 	 */
706 	set_thread(2);
707 	spdk_put_io_channel(io_ch[2]);
708 
709 	poll_threads();
710 
711 	teardown_test();
712 }
713 
714 static void
715 io_during_qos(void)
716 {
717 	struct spdk_io_channel *io_ch[3];
718 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
719 	struct spdk_bdev *bdev;
720 	enum spdk_bdev_io_status status0, status1;
721 	struct spdk_bdev_module_channel *module_ch;
722 	int rc;
723 
724 	setup_test();
725 
726 	/*
727 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
728 	 *  and verify they complete successfully.
729 	 */
730 	set_thread(0);
731 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
732 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
733 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
734 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
735 	CU_ASSERT(rc == 0);
736 	CU_ASSERT(bdev_ch[0]->flags == 0);
737 
738 	set_thread(1);
739 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
740 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
741 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
742 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
743 	CU_ASSERT(rc == 0);
744 	CU_ASSERT(bdev_ch[1]->flags == 0);
745 
746 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
747 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
748 
749 	set_thread(0);
750 	stub_complete_io(g_bdev.io_target, 0);
751 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
752 
753 	set_thread(1);
754 	stub_complete_io(g_bdev.io_target, 0);
755 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
756 
757 	poll_threads();
758 
759 	set_thread(2);
760 	bdev = &g_bdev.bdev;
761 	/*
762 	 * 10 IOs allowed per millisecond
763 	 */
764 	bdev->ios_per_sec = 10000;
765 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
766 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
767 	qos_bdev_ch = bdev->qos_channel;
768 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
769 	CU_ASSERT(qos_bdev_ch != NULL);
770 	module_ch = qos_bdev_ch->module_ch;
771 	CU_ASSERT(module_ch->io_outstanding == 0);
772 
773 	/*
774 	 * Now sending some I/Os on different channels when QoS has been enabled
775 	 */
776 	set_thread(0);
777 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
778 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
779 	CU_ASSERT(rc == 0);
780 
781 	set_thread(1);
782 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
783 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
784 	CU_ASSERT(rc == 0);
785 
786 	poll_threads();
787 	CU_ASSERT(module_ch->io_outstanding == 2);
788 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
789 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
790 
791 	/*
792 	 * IOs are operated on thread_id(2) via the QoS thread
793 	 */
794 	set_thread(2);
795 	stub_complete_io(g_bdev.io_target, 2);
796 
797 	poll_threads();
798 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
799 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
800 
801 	set_thread(0);
802 	spdk_put_io_channel(io_ch[0]);
803 	set_thread(1);
804 	spdk_put_io_channel(io_ch[1]);
805 	set_thread(2);
806 	spdk_put_io_channel(io_ch[2]);
807 
808 	poll_threads();
809 
810 	teardown_test();
811 }
812 
813 static void
814 io_during_qos_queue(void)
815 {
816 	struct spdk_io_channel *io_ch[3];
817 	struct spdk_bdev_channel *bdev_ch[3], *qos_bdev_ch;
818 	struct spdk_bdev *bdev;
819 	enum spdk_bdev_io_status status0, status1;
820 	struct spdk_bdev_module_channel *module_ch;
821 	int rc;
822 
823 	setup_test();
824 	reset_time();
825 
826 	/*
827 	 * First test normal case - submit an I/O on each of two channels (QoS not enabled)
828 	 *  and verify they complete successfully.
829 	 */
830 	set_thread(0);
831 	io_ch[0] = spdk_bdev_get_io_channel(g_desc);
832 	bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
833 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
834 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
835 	CU_ASSERT(rc == 0);
836 	CU_ASSERT(bdev_ch[0]->flags == 0);
837 
838 	set_thread(1);
839 	io_ch[1] = spdk_bdev_get_io_channel(g_desc);
840 	bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
841 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
842 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
843 	CU_ASSERT(rc == 0);
844 	CU_ASSERT(bdev_ch[1]->flags == 0);
845 
846 	poll_threads();
847 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
848 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
849 
850 	set_thread(0);
851 	stub_complete_io(g_bdev.io_target, 0);
852 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
853 
854 	set_thread(1);
855 	stub_complete_io(g_bdev.io_target, 0);
856 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
857 
858 	poll_threads();
859 
860 	set_thread(2);
861 	bdev = bdev_ch[0]->bdev;
862 	/*
863 	 * Only 1 IO allowed per millisecond. More IOs will be queued.
864 	 */
865 	bdev->ios_per_sec = 1000;
866 	io_ch[2] = spdk_bdev_get_io_channel(g_desc);
867 	bdev_ch[2] = spdk_io_channel_get_ctx(io_ch[2]);
868 	qos_bdev_ch = bdev->qos_channel;
869 	CU_ASSERT(bdev->qos_channel->flags == BDEV_CH_QOS_ENABLED);
870 	CU_ASSERT(qos_bdev_ch != NULL);
871 	module_ch = qos_bdev_ch->module_ch;
872 	CU_ASSERT(module_ch->io_outstanding == 0);
873 
874 	/*
875 	 * Now sending some I/Os on different channels when QoS has been enabled
876 	 */
877 	set_thread(0);
878 	status0 = SPDK_BDEV_IO_STATUS_PENDING;
879 	rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
880 	CU_ASSERT(rc == 0);
881 
882 	set_thread(1);
883 	status1 = SPDK_BDEV_IO_STATUS_PENDING;
884 	rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
885 	CU_ASSERT(rc == 0);
886 
887 	/*
888 	 * Poll the QoS thread to send the allowed I/O down
889 	 */
890 	poll_threads();
891 	CU_ASSERT(module_ch->io_outstanding == 1);
892 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 1);
893 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
894 
895 	/*
896 	 * Increase the time and poll the QoS thread to run the periodical poller
897 	 */
898 	increment_time(1000);
899 	poll_threads();
900 	CU_ASSERT(module_ch->io_outstanding == 2);
901 	CU_ASSERT(bdev_io_tailq_cnt(&qos_bdev_ch->qos_io) == 0);
902 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
903 
904 	/*
905 	 * IOs are handled on the thread(2) as the master thread
906 	 */
907 	set_thread(2);
908 	stub_complete_io(g_bdev.io_target, 0);
909 	spdk_put_io_channel(io_ch[0]);
910 	spdk_put_io_channel(io_ch[1]);
911 	spdk_put_io_channel(io_ch[2]);
912 
913 	poll_threads();
914 
915 	CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
916 	CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
917 
918 	teardown_test();
919 }
920 
921 static void
922 enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
923 {
924 	enum spdk_bdev_io_status *status = cb_arg;
925 
926 	*status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
927 	spdk_bdev_free_io(bdev_io);
928 }
929 
930 static void
931 enomem(void)
932 {
933 	struct spdk_io_channel *io_ch;
934 	struct spdk_bdev_channel *bdev_ch;
935 	struct spdk_bdev_module_channel *module_ch;
936 	struct ut_bdev_channel *ut_ch;
937 	const uint32_t IO_ARRAY_SIZE = 64;
938 	const uint32_t AVAIL = 20;
939 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
940 	uint32_t nomem_cnt, i;
941 	struct spdk_bdev_io *first_io;
942 	int rc;
943 
944 	setup_test();
945 
946 	set_thread(0);
947 	io_ch = spdk_bdev_get_io_channel(g_desc);
948 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
949 	module_ch = bdev_ch->module_ch;
950 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
951 	ut_ch->avail_cnt = AVAIL;
952 
953 	/* First submit a number of IOs equal to what the channel can support. */
954 	for (i = 0; i < AVAIL; i++) {
955 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
956 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
957 		CU_ASSERT(rc == 0);
958 	}
959 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
960 
961 	/*
962 	 * Next, submit one additional I/O.  This one should fail with ENOMEM and then go onto
963 	 *  the enomem_io list.
964 	 */
965 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
966 	rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
967 	CU_ASSERT(rc == 0);
968 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
969 	first_io = TAILQ_FIRST(&module_ch->nomem_io);
970 
971 	/*
972 	 * Now submit a bunch more I/O.  These should all fail with ENOMEM and get queued behind
973 	 *  the first_io above.
974 	 */
975 	for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
976 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
977 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
978 		CU_ASSERT(rc == 0);
979 	}
980 
981 	/* Assert that first_io is still at the head of the list. */
982 	CU_ASSERT(TAILQ_FIRST(&module_ch->nomem_io) == first_io);
983 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
984 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
985 	CU_ASSERT(module_ch->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
986 
987 	/*
988 	 * Complete 1 I/O only.  The key check here is bdev_io_tailq_cnt - this should not have
989 	 *  changed since completing just 1 I/O should not trigger retrying the queued nomem_io
990 	 *  list.
991 	 */
992 	stub_complete_io(g_bdev.io_target, 1);
993 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
994 
995 	/*
996 	 * Complete enough I/O to hit the nomem_theshold.  This should trigger retrying nomem_io,
997 	 *  and we should see I/O get resubmitted to the test bdev module.
998 	 */
999 	stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
1000 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) < nomem_cnt);
1001 	nomem_cnt = bdev_io_tailq_cnt(&module_ch->nomem_io);
1002 
1003 	/* Complete 1 I/O only.  This should not trigger retrying the queued nomem_io. */
1004 	stub_complete_io(g_bdev.io_target, 1);
1005 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == nomem_cnt);
1006 
1007 	/*
1008 	 * Send a reset and confirm that all I/O are completed, including the ones that
1009 	 *  were queued on the nomem_io list.
1010 	 */
1011 	status_reset = SPDK_BDEV_IO_STATUS_PENDING;
1012 	rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
1013 	poll_threads();
1014 	CU_ASSERT(rc == 0);
1015 	/* This will complete the reset. */
1016 	stub_complete_io(g_bdev.io_target, 0);
1017 
1018 	CU_ASSERT(bdev_io_tailq_cnt(&module_ch->nomem_io) == 0);
1019 	CU_ASSERT(module_ch->io_outstanding == 0);
1020 
1021 	spdk_put_io_channel(io_ch);
1022 	poll_threads();
1023 	teardown_test();
1024 }
1025 
1026 static void
1027 enomem_multi_bdev(void)
1028 {
1029 	struct spdk_io_channel *io_ch;
1030 	struct spdk_bdev_channel *bdev_ch;
1031 	struct spdk_bdev_module_channel *module_ch;
1032 	struct ut_bdev_channel *ut_ch;
1033 	const uint32_t IO_ARRAY_SIZE = 64;
1034 	const uint32_t AVAIL = 20;
1035 	enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
1036 	uint32_t i;
1037 	struct ut_bdev *second_bdev;
1038 	struct spdk_bdev_desc *second_desc;
1039 	struct spdk_bdev_channel *second_bdev_ch;
1040 	struct spdk_io_channel *second_ch;
1041 	int rc;
1042 
1043 	setup_test();
1044 
1045 	/* Register second bdev with the same io_target  */
1046 	second_bdev = calloc(1, sizeof(*second_bdev));
1047 	SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
1048 	register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
1049 	spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
1050 
1051 	set_thread(0);
1052 	io_ch = spdk_bdev_get_io_channel(g_desc);
1053 	bdev_ch = spdk_io_channel_get_ctx(io_ch);
1054 	module_ch = bdev_ch->module_ch;
1055 	ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
1056 	ut_ch->avail_cnt = AVAIL;
1057 
1058 	second_ch = spdk_bdev_get_io_channel(second_desc);
1059 	second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
1060 	SPDK_CU_ASSERT_FATAL(module_ch == second_bdev_ch->module_ch);
1061 
1062 	/* Saturate io_target through bdev A. */
1063 	for (i = 0; i < AVAIL; i++) {
1064 		status[i] = SPDK_BDEV_IO_STATUS_PENDING;
1065 		rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
1066 		CU_ASSERT(rc == 0);
1067 	}
1068 	CU_ASSERT(TAILQ_EMPTY(&module_ch->nomem_io));
1069 
1070 	/*
1071 	 * Now submit I/O through the second bdev. This should fail with ENOMEM
1072 	 * and then go onto the nomem_io list.
1073 	 */
1074 	status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
1075 	rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
1076 	CU_ASSERT(rc == 0);
1077 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&module_ch->nomem_io));
1078 
1079 	/* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
1080 	stub_complete_io(g_bdev.io_target, AVAIL);
1081 
1082 	SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&module_ch->nomem_io));
1083 	CU_ASSERT(module_ch->io_outstanding == 1);
1084 
1085 	/* Now complete our retried I/O  */
1086 	stub_complete_io(g_bdev.io_target, 1);
1087 	SPDK_CU_ASSERT_FATAL(module_ch->io_outstanding == 0);
1088 
1089 	spdk_put_io_channel(io_ch);
1090 	spdk_put_io_channel(second_ch);
1091 	spdk_bdev_close(second_desc);
1092 	unregister_bdev(second_bdev);
1093 	free(second_bdev);
1094 	poll_threads();
1095 	teardown_test();
1096 }
1097 
1098 int
1099 main(int argc, char **argv)
1100 {
1101 	CU_pSuite	suite = NULL;
1102 	unsigned int	num_failures;
1103 
1104 	if (CU_initialize_registry() != CUE_SUCCESS) {
1105 		return CU_get_error();
1106 	}
1107 
1108 	suite = CU_add_suite("bdev", NULL, NULL);
1109 	if (suite == NULL) {
1110 		CU_cleanup_registry();
1111 		return CU_get_error();
1112 	}
1113 
1114 	if (
1115 		CU_add_test(suite, "basic", basic) == NULL ||
1116 		CU_add_test(suite, "basic_poller", basic_poller) == NULL ||
1117 		CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
1118 		CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
1119 		CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
1120 		CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
1121 		CU_add_test(suite, "io_during_qos", io_during_qos) == NULL ||
1122 		CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
1123 		CU_add_test(suite, "enomem", enomem) == NULL ||
1124 		CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL
1125 	) {
1126 		CU_cleanup_registry();
1127 		return CU_get_error();
1128 	}
1129 
1130 	CU_basic_set_mode(CU_BRM_VERBOSE);
1131 	CU_basic_run_tests();
1132 	num_failures = CU_get_number_of_failures();
1133 	CU_cleanup_registry();
1134 	return num_failures;
1135 }
1136