xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 2f5c602574a98ede645991abe279a96e19c50196)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "thread/thread_internal.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static bool
52 _thread_op_supported(enum spdk_thread_op op)
53 {
54 	switch (op) {
55 	case SPDK_THREAD_OP_NEW:
56 		return true;
57 	default:
58 		return false;
59 	}
60 }
61 
62 static int
63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64 {
65 	switch (op) {
66 	case SPDK_THREAD_OP_NEW:
67 		return _thread_schedule(thread);
68 	default:
69 		return -ENOTSUP;
70 	}
71 }
72 
73 static void
74 thread_alloc(void)
75 {
76 	struct spdk_thread *thread;
77 
78 	/* No schedule callback */
79 	spdk_thread_lib_init(NULL, 0);
80 	thread = spdk_thread_create(NULL, NULL);
81 	SPDK_CU_ASSERT_FATAL(thread != NULL);
82 	spdk_set_thread(thread);
83 	spdk_thread_exit(thread);
84 	while (!spdk_thread_is_exited(thread)) {
85 		spdk_thread_poll(thread, 0, 0);
86 	}
87 	spdk_thread_destroy(thread);
88 	spdk_thread_lib_fini();
89 
90 	/* Schedule callback exists */
91 	spdk_thread_lib_init(_thread_schedule, 0);
92 
93 	/* Scheduling succeeds */
94 	g_sched_rc = 0;
95 	thread = spdk_thread_create(NULL, NULL);
96 	SPDK_CU_ASSERT_FATAL(thread != NULL);
97 	spdk_set_thread(thread);
98 	spdk_thread_exit(thread);
99 	while (!spdk_thread_is_exited(thread)) {
100 		spdk_thread_poll(thread, 0, 0);
101 	}
102 	spdk_thread_destroy(thread);
103 
104 	/* Scheduling fails */
105 	g_sched_rc = -1;
106 	thread = spdk_thread_create(NULL, NULL);
107 	SPDK_CU_ASSERT_FATAL(thread == NULL);
108 
109 	spdk_thread_lib_fini();
110 
111 	/* Scheduling callback exists with extended thread library initialization. */
112 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
113 
114 	/* Scheduling succeeds */
115 	g_sched_rc = 0;
116 	thread = spdk_thread_create(NULL, NULL);
117 	SPDK_CU_ASSERT_FATAL(thread != NULL);
118 	spdk_set_thread(thread);
119 	spdk_thread_exit(thread);
120 	while (!spdk_thread_is_exited(thread)) {
121 		spdk_thread_poll(thread, 0, 0);
122 	}
123 	spdk_thread_destroy(thread);
124 
125 	/* Scheduling fails */
126 	g_sched_rc = -1;
127 	thread = spdk_thread_create(NULL, NULL);
128 	SPDK_CU_ASSERT_FATAL(thread == NULL);
129 
130 	spdk_thread_lib_fini();
131 }
132 
133 static void
134 send_msg_cb(void *ctx)
135 {
136 	bool *done = ctx;
137 
138 	*done = true;
139 }
140 
141 static void
142 thread_send_msg(void)
143 {
144 	struct spdk_thread *thread0;
145 	bool done = false;
146 
147 	allocate_threads(2);
148 	set_thread(0);
149 	thread0 = spdk_get_thread();
150 
151 	set_thread(1);
152 	/* Simulate thread 1 sending a message to thread 0. */
153 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
154 
155 	/* We have not polled thread 0 yet, so done should be false. */
156 	CU_ASSERT(!done);
157 
158 	/*
159 	 * Poll thread 1.  The message was sent to thread 0, so this should be
160 	 *  a nop and done should still be false.
161 	 */
162 	poll_thread(1);
163 	CU_ASSERT(!done);
164 
165 	/*
166 	 * Poll thread 0.  This should execute the message and done should then
167 	 *  be true.
168 	 */
169 	poll_thread(0);
170 	CU_ASSERT(done);
171 
172 	free_threads();
173 }
174 
175 static int
176 poller_run_done(void *ctx)
177 {
178 	bool	*poller_run = ctx;
179 
180 	*poller_run = true;
181 
182 	return -1;
183 }
184 
185 static void
186 thread_poller(void)
187 {
188 	struct spdk_poller	*poller = NULL;
189 	bool			poller_run = false;
190 
191 	allocate_threads(1);
192 
193 	set_thread(0);
194 	MOCK_SET(spdk_get_ticks, 0);
195 	/* Register a poller with no-wait time and test execution */
196 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
197 	CU_ASSERT(poller != NULL);
198 
199 	poll_threads();
200 	CU_ASSERT(poller_run == true);
201 
202 	spdk_poller_unregister(&poller);
203 	CU_ASSERT(poller == NULL);
204 
205 	/* Register a poller with 1000us wait time and test single execution */
206 	poller_run = false;
207 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
208 	CU_ASSERT(poller != NULL);
209 
210 	poll_threads();
211 	CU_ASSERT(poller_run == false);
212 
213 	spdk_delay_us(1000);
214 	poll_threads();
215 	CU_ASSERT(poller_run == true);
216 
217 	poller_run = false;
218 	poll_threads();
219 	CU_ASSERT(poller_run == false);
220 
221 	spdk_delay_us(1000);
222 	poll_threads();
223 	CU_ASSERT(poller_run == true);
224 
225 	spdk_poller_unregister(&poller);
226 	CU_ASSERT(poller == NULL);
227 
228 	free_threads();
229 }
230 
231 struct poller_ctx {
232 	struct spdk_poller	*poller;
233 	bool			run;
234 };
235 
236 static int
237 poller_run_pause(void *ctx)
238 {
239 	struct poller_ctx *poller_ctx = ctx;
240 
241 	poller_ctx->run = true;
242 	spdk_poller_pause(poller_ctx->poller);
243 
244 	return 0;
245 }
246 
247 /* Verify the same poller can be switched multiple times between
248  * pause and resume while it runs.
249  */
250 static int
251 poller_run_pause_resume_pause(void *ctx)
252 {
253 	struct poller_ctx *poller_ctx = ctx;
254 
255 	poller_ctx->run = true;
256 
257 	spdk_poller_pause(poller_ctx->poller);
258 	spdk_poller_resume(poller_ctx->poller);
259 	spdk_poller_pause(poller_ctx->poller);
260 
261 	return 0;
262 }
263 
264 static void
265 poller_msg_pause_cb(void *ctx)
266 {
267 	struct spdk_poller *poller = ctx;
268 
269 	spdk_poller_pause(poller);
270 }
271 
272 static void
273 poller_msg_resume_cb(void *ctx)
274 {
275 	struct spdk_poller *poller = ctx;
276 
277 	spdk_poller_resume(poller);
278 }
279 
280 static void
281 poller_pause(void)
282 {
283 	struct poller_ctx poller_ctx = {};
284 	unsigned int delay[] = { 0, 1000 };
285 	unsigned int i;
286 
287 	allocate_threads(1);
288 	set_thread(0);
289 
290 	/* Register a poller that pauses itself */
291 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
292 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
293 
294 	poller_ctx.run = false;
295 	poll_threads();
296 	CU_ASSERT_EQUAL(poller_ctx.run, true);
297 
298 	poller_ctx.run = false;
299 	poll_threads();
300 	CU_ASSERT_EQUAL(poller_ctx.run, false);
301 
302 	spdk_poller_unregister(&poller_ctx.poller);
303 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
304 
305 	/* Register a poller that switches between pause and resume itself */
306 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
307 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
308 
309 	poller_ctx.run = false;
310 	poll_threads();
311 	CU_ASSERT_EQUAL(poller_ctx.run, true);
312 
313 	poller_ctx.run = false;
314 	poll_threads();
315 	CU_ASSERT_EQUAL(poller_ctx.run, false);
316 
317 	spdk_poller_unregister(&poller_ctx.poller);
318 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
319 
320 	/* Verify that resuming an unpaused poller doesn't do anything */
321 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
322 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
323 
324 	spdk_poller_resume(poller_ctx.poller);
325 
326 	poller_ctx.run = false;
327 	poll_threads();
328 	CU_ASSERT_EQUAL(poller_ctx.run, true);
329 
330 	/* Verify that pausing the same poller twice works too */
331 	spdk_poller_pause(poller_ctx.poller);
332 
333 	poller_ctx.run = false;
334 	poll_threads();
335 	CU_ASSERT_EQUAL(poller_ctx.run, false);
336 
337 	spdk_poller_pause(poller_ctx.poller);
338 	poll_threads();
339 	CU_ASSERT_EQUAL(poller_ctx.run, false);
340 
341 	spdk_poller_resume(poller_ctx.poller);
342 	poll_threads();
343 	CU_ASSERT_EQUAL(poller_ctx.run, true);
344 
345 	/* Verify that a poller is run when it's resumed immediately after pausing */
346 	poller_ctx.run = false;
347 	spdk_poller_pause(poller_ctx.poller);
348 	spdk_poller_resume(poller_ctx.poller);
349 	poll_threads();
350 	CU_ASSERT_EQUAL(poller_ctx.run, true);
351 
352 	spdk_poller_unregister(&poller_ctx.poller);
353 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
354 
355 	/* Poll the thread to make sure the previous poller gets unregistered */
356 	poll_threads();
357 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
358 
359 	/* Verify that it's possible to unregister a paused poller */
360 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
361 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
362 
363 	poller_ctx.run = false;
364 	poll_threads();
365 	CU_ASSERT_EQUAL(poller_ctx.run, true);
366 
367 	spdk_poller_pause(poller_ctx.poller);
368 
369 	poller_ctx.run = false;
370 	poll_threads();
371 	CU_ASSERT_EQUAL(poller_ctx.run, false);
372 
373 	spdk_poller_unregister(&poller_ctx.poller);
374 
375 	poll_threads();
376 	CU_ASSERT_EQUAL(poller_ctx.run, false);
377 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
378 
379 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
380 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
381 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
382 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
383 
384 		spdk_delay_us(delay[i]);
385 		poller_ctx.run = false;
386 		poll_threads();
387 		CU_ASSERT_EQUAL(poller_ctx.run, true);
388 
389 		spdk_poller_pause(poller_ctx.poller);
390 
391 		spdk_delay_us(delay[i]);
392 		poller_ctx.run = false;
393 		poll_threads();
394 		CU_ASSERT_EQUAL(poller_ctx.run, false);
395 
396 		spdk_poller_resume(poller_ctx.poller);
397 
398 		spdk_delay_us(delay[i]);
399 		poll_threads();
400 		CU_ASSERT_EQUAL(poller_ctx.run, true);
401 
402 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
403 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
404 
405 		spdk_delay_us(delay[i]);
406 		poller_ctx.run = false;
407 		poll_threads();
408 		CU_ASSERT_EQUAL(poller_ctx.run, false);
409 
410 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
411 
412 		poll_threads();
413 		if (delay[i] > 0) {
414 			spdk_delay_us(delay[i]);
415 			poll_threads();
416 		}
417 		CU_ASSERT_EQUAL(poller_ctx.run, true);
418 
419 		spdk_poller_unregister(&poller_ctx.poller);
420 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
421 
422 		/* Register a timed poller that pauses itself */
423 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
424 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
425 
426 		spdk_delay_us(delay[i]);
427 		poller_ctx.run = false;
428 		poll_threads();
429 		CU_ASSERT_EQUAL(poller_ctx.run, true);
430 
431 		poller_ctx.run = false;
432 		spdk_delay_us(delay[i]);
433 		poll_threads();
434 		CU_ASSERT_EQUAL(poller_ctx.run, false);
435 
436 		spdk_poller_resume(poller_ctx.poller);
437 
438 		CU_ASSERT_EQUAL(poller_ctx.run, false);
439 		spdk_delay_us(delay[i]);
440 		poll_threads();
441 		CU_ASSERT_EQUAL(poller_ctx.run, true);
442 
443 		spdk_poller_unregister(&poller_ctx.poller);
444 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
445 
446 		/* Register a timed poller that switches between pause and resume itself */
447 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
448 				    &poller_ctx, delay[i]);
449 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
450 
451 		spdk_delay_us(delay[i]);
452 		poller_ctx.run = false;
453 		poll_threads();
454 		CU_ASSERT_EQUAL(poller_ctx.run, true);
455 
456 		poller_ctx.run = false;
457 		spdk_delay_us(delay[i]);
458 		poll_threads();
459 		CU_ASSERT_EQUAL(poller_ctx.run, false);
460 
461 		spdk_poller_resume(poller_ctx.poller);
462 
463 		CU_ASSERT_EQUAL(poller_ctx.run, false);
464 		spdk_delay_us(delay[i]);
465 		poll_threads();
466 		CU_ASSERT_EQUAL(poller_ctx.run, true);
467 
468 		spdk_poller_unregister(&poller_ctx.poller);
469 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
470 	}
471 
472 	free_threads();
473 }
474 
475 static void
476 for_each_cb(void *ctx)
477 {
478 	int *count = ctx;
479 
480 	(*count)++;
481 }
482 
483 static void
484 thread_for_each(void)
485 {
486 	int count = 0;
487 	int i;
488 
489 	allocate_threads(3);
490 	set_thread(0);
491 
492 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
493 
494 	/* We have not polled thread 0 yet, so count should be 0 */
495 	CU_ASSERT(count == 0);
496 
497 	/* Poll each thread to verify the message is passed to each */
498 	for (i = 0; i < 3; i++) {
499 		poll_thread(i);
500 		CU_ASSERT(count == (i + 1));
501 	}
502 
503 	/*
504 	 * After each thread is called, the completion calls it
505 	 * one more time.
506 	 */
507 	poll_thread(0);
508 	CU_ASSERT(count == 4);
509 
510 	free_threads();
511 }
512 
513 static int
514 channel_create(void *io_device, void *ctx_buf)
515 {
516 	int *ch_count = io_device;
517 
518 	(*ch_count)++;
519 	return 0;
520 }
521 
522 static void
523 channel_destroy(void *io_device, void *ctx_buf)
524 {
525 	int *ch_count = io_device;
526 
527 	(*ch_count)--;
528 }
529 
530 static void
531 channel_msg(struct spdk_io_channel_iter *i)
532 {
533 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
534 
535 	(*msg_count)++;
536 	spdk_for_each_channel_continue(i, 0);
537 }
538 
539 static void
540 channel_cpl(struct spdk_io_channel_iter *i, int status)
541 {
542 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
543 
544 	(*msg_count)++;
545 }
546 
547 static void
548 for_each_channel_remove(void)
549 {
550 	struct spdk_io_channel *ch0, *ch1, *ch2;
551 	int ch_count = 0;
552 	int msg_count = 0;
553 
554 	allocate_threads(3);
555 	set_thread(0);
556 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
557 	ch0 = spdk_get_io_channel(&ch_count);
558 	set_thread(1);
559 	ch1 = spdk_get_io_channel(&ch_count);
560 	set_thread(2);
561 	ch2 = spdk_get_io_channel(&ch_count);
562 	CU_ASSERT(ch_count == 3);
563 
564 	/*
565 	 * Test that io_channel handles the case where we start to iterate through
566 	 *  the channels, and during the iteration, one of the channels is deleted.
567 	 * This is done in some different and sometimes non-intuitive orders, because
568 	 *  some operations are deferred and won't execute until their threads are
569 	 *  polled.
570 	 *
571 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
572 	 */
573 	set_thread(0);
574 	spdk_put_io_channel(ch0);
575 	CU_ASSERT(ch_count == 3);
576 	poll_threads();
577 	CU_ASSERT(ch_count == 2);
578 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
579 	CU_ASSERT(msg_count == 0);
580 	poll_threads();
581 	CU_ASSERT(msg_count == 3);
582 
583 	msg_count = 0;
584 
585 	/*
586 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
587 	 *  thread 0 is polled.
588 	 */
589 	ch0 = spdk_get_io_channel(&ch_count);
590 	CU_ASSERT(ch_count == 3);
591 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
592 	spdk_put_io_channel(ch0);
593 	CU_ASSERT(ch_count == 3);
594 
595 	poll_threads();
596 	CU_ASSERT(ch_count == 2);
597 	CU_ASSERT(msg_count == 4);
598 	set_thread(1);
599 	spdk_put_io_channel(ch1);
600 	CU_ASSERT(ch_count == 2);
601 	set_thread(2);
602 	spdk_put_io_channel(ch2);
603 	CU_ASSERT(ch_count == 2);
604 	poll_threads();
605 	CU_ASSERT(ch_count == 0);
606 
607 	spdk_io_device_unregister(&ch_count, NULL);
608 	poll_threads();
609 
610 	free_threads();
611 }
612 
613 struct unreg_ctx {
614 	bool	ch_done;
615 	bool	foreach_done;
616 };
617 
618 static void
619 unreg_ch_done(struct spdk_io_channel_iter *i)
620 {
621 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
622 
623 	ctx->ch_done = true;
624 
625 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
626 	spdk_for_each_channel_continue(i, 0);
627 }
628 
629 static void
630 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
631 {
632 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
633 
634 	ctx->foreach_done = true;
635 }
636 
637 static void
638 for_each_channel_unreg(void)
639 {
640 	struct spdk_io_channel *ch0;
641 	struct io_device *dev;
642 	struct unreg_ctx ctx = {};
643 	int io_target = 0;
644 
645 	allocate_threads(1);
646 	set_thread(0);
647 	CU_ASSERT(RB_EMPTY(&g_io_devices));
648 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
649 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
650 	dev = RB_MIN(io_device_tree, &g_io_devices);
651 	SPDK_CU_ASSERT_FATAL(dev != NULL);
652 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
653 	ch0 = spdk_get_io_channel(&io_target);
654 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
655 
656 	spdk_io_device_unregister(&io_target, NULL);
657 	/*
658 	 * There is an outstanding foreach call on the io_device, so the unregister should not
659 	 *  have removed the device.
660 	 */
661 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
662 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
663 	/*
664 	 * There is already a device registered at &io_target, so a new io_device should not
665 	 *  have been added to g_io_devices.
666 	 */
667 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
668 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
669 
670 	poll_thread(0);
671 	CU_ASSERT(ctx.ch_done == true);
672 	CU_ASSERT(ctx.foreach_done == true);
673 	/*
674 	 * There are no more foreach operations outstanding, so we can unregister the device,
675 	 *  even though a channel still exists for the device.
676 	 */
677 	spdk_io_device_unregister(&io_target, NULL);
678 	CU_ASSERT(RB_EMPTY(&g_io_devices));
679 
680 	set_thread(0);
681 	spdk_put_io_channel(ch0);
682 
683 	poll_threads();
684 
685 	free_threads();
686 }
687 
688 static void
689 thread_name(void)
690 {
691 	struct spdk_thread *thread;
692 	const char *name;
693 
694 	spdk_thread_lib_init(NULL, 0);
695 
696 	/* Create thread with no name, which automatically generates one */
697 	thread = spdk_thread_create(NULL, NULL);
698 	spdk_set_thread(thread);
699 	thread = spdk_get_thread();
700 	SPDK_CU_ASSERT_FATAL(thread != NULL);
701 	name = spdk_thread_get_name(thread);
702 	CU_ASSERT(name != NULL);
703 	spdk_thread_exit(thread);
704 	while (!spdk_thread_is_exited(thread)) {
705 		spdk_thread_poll(thread, 0, 0);
706 	}
707 	spdk_thread_destroy(thread);
708 
709 	/* Create thread named "test_thread" */
710 	thread = spdk_thread_create("test_thread", NULL);
711 	spdk_set_thread(thread);
712 	thread = spdk_get_thread();
713 	SPDK_CU_ASSERT_FATAL(thread != NULL);
714 	name = spdk_thread_get_name(thread);
715 	SPDK_CU_ASSERT_FATAL(name != NULL);
716 	CU_ASSERT(strcmp(name, "test_thread") == 0);
717 	spdk_thread_exit(thread);
718 	while (!spdk_thread_is_exited(thread)) {
719 		spdk_thread_poll(thread, 0, 0);
720 	}
721 	spdk_thread_destroy(thread);
722 
723 	spdk_thread_lib_fini();
724 }
725 
726 static uint64_t g_device1;
727 static uint64_t g_device2;
728 static uint64_t g_device3;
729 
730 static uint64_t g_ctx1 = 0x1111;
731 static uint64_t g_ctx2 = 0x2222;
732 
733 static int g_create_cb_calls = 0;
734 static int g_destroy_cb_calls = 0;
735 
736 static int
737 create_cb_1(void *io_device, void *ctx_buf)
738 {
739 	CU_ASSERT(io_device == &g_device1);
740 	*(uint64_t *)ctx_buf = g_ctx1;
741 	g_create_cb_calls++;
742 	return 0;
743 }
744 
745 static void
746 destroy_cb_1(void *io_device, void *ctx_buf)
747 {
748 	CU_ASSERT(io_device == &g_device1);
749 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
750 	g_destroy_cb_calls++;
751 }
752 
753 static int
754 create_cb_2(void *io_device, void *ctx_buf)
755 {
756 	CU_ASSERT(io_device == &g_device2);
757 	*(uint64_t *)ctx_buf = g_ctx2;
758 	g_create_cb_calls++;
759 	return 0;
760 }
761 
762 static void
763 destroy_cb_2(void *io_device, void *ctx_buf)
764 {
765 	CU_ASSERT(io_device == &g_device2);
766 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
767 	g_destroy_cb_calls++;
768 }
769 
770 static void
771 channel(void)
772 {
773 	struct spdk_io_channel *ch1, *ch2;
774 	void *ctx;
775 
776 	allocate_threads(1);
777 	set_thread(0);
778 
779 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
780 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
781 
782 	g_create_cb_calls = 0;
783 	ch1 = spdk_get_io_channel(&g_device1);
784 	CU_ASSERT(g_create_cb_calls == 1);
785 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
786 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
787 
788 	g_create_cb_calls = 0;
789 	ch2 = spdk_get_io_channel(&g_device1);
790 	CU_ASSERT(g_create_cb_calls == 0);
791 	CU_ASSERT(ch1 == ch2);
792 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
793 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
794 
795 	g_destroy_cb_calls = 0;
796 	spdk_put_io_channel(ch2);
797 	poll_threads();
798 	CU_ASSERT(g_destroy_cb_calls == 0);
799 
800 	g_create_cb_calls = 0;
801 	ch2 = spdk_get_io_channel(&g_device2);
802 	CU_ASSERT(g_create_cb_calls == 1);
803 	CU_ASSERT(ch1 != ch2);
804 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
805 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
806 
807 	ctx = spdk_io_channel_get_ctx(ch2);
808 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
809 
810 	g_destroy_cb_calls = 0;
811 	spdk_put_io_channel(ch1);
812 	poll_threads();
813 	CU_ASSERT(g_destroy_cb_calls == 1);
814 
815 	g_destroy_cb_calls = 0;
816 	spdk_put_io_channel(ch2);
817 	poll_threads();
818 	CU_ASSERT(g_destroy_cb_calls == 1);
819 
820 	ch1 = spdk_get_io_channel(&g_device3);
821 	CU_ASSERT(ch1 == NULL);
822 
823 	spdk_io_device_unregister(&g_device1, NULL);
824 	poll_threads();
825 	spdk_io_device_unregister(&g_device2, NULL);
826 	poll_threads();
827 	CU_ASSERT(RB_EMPTY(&g_io_devices));
828 	free_threads();
829 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
830 }
831 
832 static int
833 create_cb(void *io_device, void *ctx_buf)
834 {
835 	uint64_t *refcnt = (uint64_t *)ctx_buf;
836 
837 	CU_ASSERT(*refcnt == 0);
838 	*refcnt = 1;
839 
840 	return 0;
841 }
842 
843 static void
844 destroy_cb(void *io_device, void *ctx_buf)
845 {
846 	uint64_t *refcnt = (uint64_t *)ctx_buf;
847 
848 	CU_ASSERT(*refcnt == 1);
849 	*refcnt = 0;
850 }
851 
852 /**
853  * This test is checking that a sequence of get, put, get, put without allowing
854  * the deferred put operation to complete doesn't result in releasing the memory
855  * for the channel twice.
856  */
857 static void
858 channel_destroy_races(void)
859 {
860 	uint64_t device;
861 	struct spdk_io_channel *ch;
862 
863 	allocate_threads(1);
864 	set_thread(0);
865 
866 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
867 
868 	ch = spdk_get_io_channel(&device);
869 	SPDK_CU_ASSERT_FATAL(ch != NULL);
870 
871 	spdk_put_io_channel(ch);
872 
873 	ch = spdk_get_io_channel(&device);
874 	SPDK_CU_ASSERT_FATAL(ch != NULL);
875 
876 	spdk_put_io_channel(ch);
877 	poll_threads();
878 
879 	spdk_io_device_unregister(&device, NULL);
880 	poll_threads();
881 
882 	CU_ASSERT(RB_EMPTY(&g_io_devices));
883 	free_threads();
884 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
885 }
886 
887 static void
888 thread_exit_test(void)
889 {
890 	struct spdk_thread *thread;
891 	struct spdk_io_channel *ch;
892 	struct spdk_poller *poller1, *poller2;
893 	void *ctx;
894 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
895 	int rc __attribute__((unused));
896 
897 	MOCK_SET(spdk_get_ticks, 10);
898 	MOCK_SET(spdk_get_ticks_hz, 1);
899 
900 	allocate_threads(4);
901 
902 	/* Test if all pending messages are reaped for the exiting thread, and the
903 	 * thread moves to the exited state.
904 	 */
905 	set_thread(0);
906 	thread = spdk_get_thread();
907 
908 	/* Sending message to thread 0 will be accepted. */
909 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
910 	CU_ASSERT(rc == 0);
911 	CU_ASSERT(!done1);
912 
913 	/* Move thread 0 to the exiting state. */
914 	spdk_thread_exit(thread);
915 
916 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
917 
918 	/* Sending message to thread 0 will be still accepted. */
919 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
920 	CU_ASSERT(rc == 0);
921 
922 	/* Thread 0 will reap pending messages. */
923 	poll_thread(0);
924 	CU_ASSERT(done1 == true);
925 	CU_ASSERT(done2 == true);
926 
927 	/* Thread 0 will move to the exited state. */
928 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
929 
930 	/* Test releasing I/O channel is reaped even after the thread moves to
931 	 * the exiting state
932 	 */
933 	set_thread(1);
934 
935 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
936 
937 	g_create_cb_calls = 0;
938 	ch = spdk_get_io_channel(&g_device1);
939 	CU_ASSERT(g_create_cb_calls == 1);
940 	SPDK_CU_ASSERT_FATAL(ch != NULL);
941 
942 	ctx = spdk_io_channel_get_ctx(ch);
943 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
944 
945 	g_destroy_cb_calls = 0;
946 	spdk_put_io_channel(ch);
947 
948 	thread = spdk_get_thread();
949 	spdk_thread_exit(thread);
950 
951 	/* Thread 1 will not move to the exited state yet because I/O channel release
952 	 * does not complete yet.
953 	 */
954 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
955 
956 	/* Thread 1 will be able to get the another reference of I/O channel
957 	 * even after the thread moves to the exiting state.
958 	 */
959 	g_create_cb_calls = 0;
960 	ch = spdk_get_io_channel(&g_device1);
961 
962 	CU_ASSERT(g_create_cb_calls == 0);
963 	SPDK_CU_ASSERT_FATAL(ch != NULL);
964 
965 	ctx = spdk_io_channel_get_ctx(ch);
966 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
967 
968 	spdk_put_io_channel(ch);
969 
970 	poll_threads();
971 	CU_ASSERT(g_destroy_cb_calls == 1);
972 
973 	/* Thread 1 will move to the exited state after I/O channel is released.
974 	 * are released.
975 	 */
976 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
977 
978 	spdk_io_device_unregister(&g_device1, NULL);
979 	poll_threads();
980 
981 	/* Test if unregistering poller is reaped for the exiting thread, and the
982 	 * thread moves to the exited thread.
983 	 */
984 	set_thread(2);
985 	thread = spdk_get_thread();
986 
987 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
988 	CU_ASSERT(poller1 != NULL);
989 
990 	spdk_poller_unregister(&poller1);
991 
992 	spdk_thread_exit(thread);
993 
994 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
995 
996 	poll_threads();
997 
998 	CU_ASSERT(poller1_run == false);
999 	CU_ASSERT(poller2_run == true);
1000 
1001 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1002 
1003 	spdk_poller_unregister(&poller2);
1004 
1005 	poll_threads();
1006 
1007 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1008 
1009 	/* Test if the exiting thread is exited forcefully after timeout. */
1010 	set_thread(3);
1011 	thread = spdk_get_thread();
1012 
1013 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
1014 	CU_ASSERT(poller1 != NULL);
1015 
1016 	spdk_thread_exit(thread);
1017 
1018 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1019 
1020 	MOCK_SET(spdk_get_ticks, 11);
1021 
1022 	poll_threads();
1023 
1024 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1025 
1026 	/* Cause timeout forcefully. */
1027 	MOCK_SET(spdk_get_ticks, 15);
1028 
1029 	poll_threads();
1030 
1031 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1032 
1033 	spdk_poller_unregister(&poller1);
1034 
1035 	poll_threads();
1036 
1037 	MOCK_CLEAR(spdk_get_ticks);
1038 	MOCK_CLEAR(spdk_get_ticks_hz);
1039 
1040 	free_threads();
1041 }
1042 
1043 static int
1044 poller_run_idle(void *ctx)
1045 {
1046 	uint64_t delay_us = (uint64_t)ctx;
1047 
1048 	spdk_delay_us(delay_us);
1049 
1050 	return 0;
1051 }
1052 
1053 static int
1054 poller_run_busy(void *ctx)
1055 {
1056 	uint64_t delay_us = (uint64_t)ctx;
1057 
1058 	spdk_delay_us(delay_us);
1059 
1060 	return 1;
1061 }
1062 
1063 static void
1064 thread_update_stats_test(void)
1065 {
1066 	struct spdk_poller	*poller;
1067 	struct spdk_thread	*thread;
1068 
1069 	MOCK_SET(spdk_get_ticks, 10);
1070 
1071 	allocate_threads(1);
1072 
1073 	set_thread(0);
1074 	thread = spdk_get_thread();
1075 
1076 	CU_ASSERT(thread->tsc_last == 10);
1077 	CU_ASSERT(thread->stats.idle_tsc == 0);
1078 	CU_ASSERT(thread->stats.busy_tsc == 0);
1079 
1080 	/* Test if idle_tsc is updated expectedly. */
1081 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1082 	CU_ASSERT(poller != NULL);
1083 
1084 	spdk_delay_us(100);
1085 
1086 	poll_thread_times(0, 1);
1087 
1088 	CU_ASSERT(thread->tsc_last == 1110);
1089 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1090 	CU_ASSERT(thread->stats.busy_tsc == 0);
1091 
1092 	spdk_delay_us(100);
1093 
1094 	poll_thread_times(0, 1);
1095 
1096 	CU_ASSERT(thread->tsc_last == 2210);
1097 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1098 	CU_ASSERT(thread->stats.busy_tsc == 0);
1099 
1100 	spdk_poller_unregister(&poller);
1101 
1102 	/* Test if busy_tsc is updated expectedly. */
1103 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1104 	CU_ASSERT(poller != NULL);
1105 
1106 	spdk_delay_us(10000);
1107 
1108 	poll_thread_times(0, 1);
1109 
1110 	CU_ASSERT(thread->tsc_last == 112210);
1111 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1112 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1113 
1114 	spdk_delay_us(10000);
1115 
1116 	poll_thread_times(0, 1);
1117 
1118 	CU_ASSERT(thread->tsc_last == 222210);
1119 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1120 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1121 
1122 	spdk_poller_unregister(&poller);
1123 
1124 	MOCK_CLEAR(spdk_get_ticks);
1125 
1126 	free_threads();
1127 }
1128 
1129 struct ut_nested_ch {
1130 	struct spdk_io_channel *child;
1131 	struct spdk_poller *poller;
1132 };
1133 
1134 struct ut_nested_dev {
1135 	struct ut_nested_dev *child;
1136 };
1137 
1138 static int
1139 ut_null_poll(void *ctx)
1140 {
1141 	return -1;
1142 }
1143 
1144 static int
1145 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1146 {
1147 	struct ut_nested_ch *_ch = ctx_buf;
1148 	struct ut_nested_dev *_dev = io_device;
1149 	struct ut_nested_dev *_child;
1150 
1151 	_child = _dev->child;
1152 
1153 	if (_child != NULL) {
1154 		_ch->child = spdk_get_io_channel(_child);
1155 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1156 	} else {
1157 		_ch->child = NULL;
1158 	}
1159 
1160 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1161 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1162 
1163 	return 0;
1164 }
1165 
1166 static void
1167 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1168 {
1169 	struct ut_nested_ch *_ch = ctx_buf;
1170 	struct spdk_io_channel *child;
1171 
1172 	child = _ch->child;
1173 	if (child != NULL) {
1174 		spdk_put_io_channel(child);
1175 	}
1176 
1177 	spdk_poller_unregister(&_ch->poller);
1178 }
1179 
1180 static void
1181 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1182 {
1183 	CU_ASSERT(ch->ref == 1);
1184 	CU_ASSERT(ch->dev == dev);
1185 	CU_ASSERT(dev->refcnt == 1);
1186 }
1187 
1188 static void
1189 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1190 {
1191 	CU_ASSERT(ch->ref == 0);
1192 	CU_ASSERT(ch->destroy_ref == 1);
1193 	CU_ASSERT(dev->refcnt == 1);
1194 }
1195 
1196 static void
1197 ut_check_nested_ch_destroy_post(struct io_device *dev)
1198 {
1199 	CU_ASSERT(dev->refcnt == 0);
1200 }
1201 
1202 static void
1203 ut_check_nested_poller_register(struct spdk_poller *poller)
1204 {
1205 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1206 }
1207 
1208 static void
1209 nested_channel(void)
1210 {
1211 	struct ut_nested_dev _dev1, _dev2, _dev3;
1212 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1213 	struct io_device *dev1, *dev2, *dev3;
1214 	struct spdk_io_channel *ch1, *ch2, *ch3;
1215 	struct spdk_poller *poller;
1216 	struct spdk_thread *thread;
1217 
1218 	allocate_threads(1);
1219 	set_thread(0);
1220 
1221 	thread = spdk_get_thread();
1222 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1223 
1224 	_dev1.child = &_dev2;
1225 	_dev2.child = &_dev3;
1226 	_dev3.child = NULL;
1227 
1228 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1229 				sizeof(struct ut_nested_ch), "dev1");
1230 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1231 				sizeof(struct ut_nested_ch), "dev2");
1232 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1233 				sizeof(struct ut_nested_ch), "dev3");
1234 
1235 	dev1 = io_device_get(&_dev1);
1236 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1237 	dev2 = io_device_get(&_dev2);
1238 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1239 	dev3 = io_device_get(&_dev3);
1240 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1241 
1242 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1243 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1244 	 */
1245 	ch1 = spdk_get_io_channel(&_dev1);
1246 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1247 
1248 	_ch1 = spdk_io_channel_get_ctx(ch1);
1249 	ch2 = _ch1->child;
1250 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1251 
1252 	_ch2 = spdk_io_channel_get_ctx(ch2);
1253 	ch3 = _ch2->child;
1254 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1255 
1256 	_ch3 = spdk_io_channel_get_ctx(ch3);
1257 	CU_ASSERT(_ch3->child == NULL);
1258 
1259 	ut_check_nested_ch_create(ch1, dev1);
1260 	ut_check_nested_ch_create(ch2, dev2);
1261 	ut_check_nested_ch_create(ch3, dev3);
1262 
1263 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1264 
1265 	ut_check_nested_poller_register(poller);
1266 	ut_check_nested_poller_register(_ch1->poller);
1267 	ut_check_nested_poller_register(_ch2->poller);
1268 	ut_check_nested_poller_register(_ch3->poller);
1269 
1270 	spdk_poller_unregister(&poller);
1271 	poll_thread_times(0, 1);
1272 
1273 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1274 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1275 	 */
1276 	spdk_put_io_channel(ch1);
1277 
1278 	/* Start exiting the current thread after unregistering the non-nested
1279 	 * I/O channel.
1280 	 */
1281 	spdk_thread_exit(thread);
1282 
1283 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1284 	poll_thread_times(0, 1);
1285 	ut_check_nested_ch_destroy_post(dev1);
1286 
1287 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1288 
1289 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1290 	poll_thread_times(0, 1);
1291 	ut_check_nested_ch_destroy_post(dev2);
1292 
1293 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1294 
1295 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1296 	poll_thread_times(0, 1);
1297 	ut_check_nested_ch_destroy_post(dev3);
1298 
1299 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1300 
1301 	spdk_io_device_unregister(&_dev1, NULL);
1302 	spdk_io_device_unregister(&_dev2, NULL);
1303 	spdk_io_device_unregister(&_dev3, NULL);
1304 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1305 
1306 	free_threads();
1307 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1308 }
1309 
1310 static int
1311 create_cb2(void *io_device, void *ctx_buf)
1312 {
1313 	uint64_t *devcnt = (uint64_t *)io_device;
1314 
1315 	*devcnt += 1;
1316 
1317 	return 0;
1318 }
1319 
1320 static void
1321 destroy_cb2(void *io_device, void *ctx_buf)
1322 {
1323 	uint64_t *devcnt = (uint64_t *)io_device;
1324 
1325 	CU_ASSERT(*devcnt > 0);
1326 	*devcnt -= 1;
1327 }
1328 
1329 static void
1330 unregister_cb2(void *io_device)
1331 {
1332 	uint64_t *devcnt = (uint64_t *)io_device;
1333 
1334 	CU_ASSERT(*devcnt == 0);
1335 }
1336 
1337 static void
1338 device_unregister_and_thread_exit_race(void)
1339 {
1340 	uint64_t device = 0;
1341 	struct spdk_io_channel *ch1, *ch2;
1342 	struct spdk_thread *thread1, *thread2;
1343 
1344 	/* Create two threads and each thread gets a channel from the same device. */
1345 	allocate_threads(2);
1346 	set_thread(0);
1347 
1348 	thread1 = spdk_get_thread();
1349 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1350 
1351 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1352 
1353 	ch1 = spdk_get_io_channel(&device);
1354 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1355 
1356 	set_thread(1);
1357 
1358 	thread2 = spdk_get_thread();
1359 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1360 
1361 	ch2 = spdk_get_io_channel(&device);
1362 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1363 
1364 	set_thread(0);
1365 
1366 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1367 	 * and a device are released.
1368 	 */
1369 	spdk_thread_exit(thread1);
1370 	poll_thread(0);
1371 
1372 	spdk_put_io_channel(ch1);
1373 
1374 	spdk_io_device_unregister(&device, unregister_cb2);
1375 	poll_thread(0);
1376 
1377 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1378 
1379 	set_thread(1);
1380 
1381 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1382 	 * is released.
1383 	 */
1384 	spdk_thread_exit(thread2);
1385 	poll_thread(1);
1386 
1387 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1388 
1389 	spdk_put_io_channel(ch2);
1390 	poll_thread(1);
1391 
1392 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1393 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1394 
1395 	poll_thread(0);
1396 
1397 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1398 
1399 	free_threads();
1400 }
1401 
1402 static int
1403 dummy_poller(void *arg)
1404 {
1405 	return SPDK_POLLER_IDLE;
1406 }
1407 
1408 static void
1409 cache_closest_timed_poller(void)
1410 {
1411 	struct spdk_thread *thread;
1412 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1413 
1414 	allocate_threads(1);
1415 	set_thread(0);
1416 
1417 	thread = spdk_get_thread();
1418 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1419 
1420 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1421 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1422 
1423 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1424 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1425 
1426 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1427 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1428 
1429 	poll_threads();
1430 
1431 	/* When multiple timed pollers are inserted, the cache should
1432 	 * have the closest timed poller.
1433 	 */
1434 	CU_ASSERT(thread->first_timed_poller == poller1);
1435 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1436 
1437 	spdk_delay_us(1000);
1438 	poll_threads();
1439 
1440 	CU_ASSERT(thread->first_timed_poller == poller2);
1441 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1442 
1443 	/* If we unregister a timed poller by spdk_poller_unregister()
1444 	 * when it is waiting, it is marked as being unregistereed and
1445 	 * is actually unregistered when it is expired.
1446 	 *
1447 	 * Hence if we unregister the closest timed poller when it is waiting,
1448 	 * the cache is not updated to the next timed poller until it is expired.
1449 	 */
1450 	tmp = poller2;
1451 
1452 	spdk_poller_unregister(&poller2);
1453 	CU_ASSERT(poller2 == NULL);
1454 
1455 	spdk_delay_us(499);
1456 	poll_threads();
1457 
1458 	CU_ASSERT(thread->first_timed_poller == tmp);
1459 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1460 
1461 	spdk_delay_us(1);
1462 	poll_threads();
1463 
1464 	CU_ASSERT(thread->first_timed_poller == poller3);
1465 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1466 
1467 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1468 	 * it is marked as being paused and is actually paused when it is expired.
1469 	 *
1470 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1471 	 * is not updated to the next timed poller until it is expired.
1472 	 */
1473 	spdk_poller_pause(poller3);
1474 
1475 	spdk_delay_us(299);
1476 	poll_threads();
1477 
1478 	CU_ASSERT(thread->first_timed_poller == poller3);
1479 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1480 
1481 	spdk_delay_us(1);
1482 	poll_threads();
1483 
1484 	CU_ASSERT(thread->first_timed_poller == poller1);
1485 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1486 
1487 	/* After unregistering all timed pollers, the cache should
1488 	 * be NULL.
1489 	 */
1490 	spdk_poller_unregister(&poller1);
1491 	spdk_poller_unregister(&poller3);
1492 
1493 	spdk_delay_us(200);
1494 	poll_threads();
1495 
1496 	CU_ASSERT(thread->first_timed_poller == NULL);
1497 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1498 
1499 	free_threads();
1500 }
1501 
1502 static void
1503 multi_timed_pollers_have_same_expiration(void)
1504 {
1505 	struct spdk_thread *thread;
1506 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1507 	uint64_t start_ticks;
1508 
1509 	allocate_threads(1);
1510 	set_thread(0);
1511 
1512 	thread = spdk_get_thread();
1513 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1514 
1515 	/*
1516 	 * case 1: multiple timed pollers have the same next_run_tick.
1517 	 */
1518 	start_ticks = spdk_get_ticks();
1519 
1520 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1521 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1522 
1523 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1524 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1525 
1526 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1527 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1528 
1529 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1530 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1531 
1532 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1533 	 * because poller1 is registered earlier than poller2.
1534 	 */
1535 	CU_ASSERT(thread->first_timed_poller == poller1);
1536 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1537 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1538 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1539 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1540 
1541 	/* after 500 usec, poller1 and poller2 are expired. */
1542 	spdk_delay_us(500);
1543 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1544 	poll_threads();
1545 
1546 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1547 	 * has poller3 because poller3 is not expired yet.
1548 	 */
1549 	CU_ASSERT(thread->first_timed_poller == poller3);
1550 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1551 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1552 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1553 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1554 
1555 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1556 	spdk_delay_us(500);
1557 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1558 	poll_threads();
1559 
1560 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1561 	 * has poller4 because poller4 is not expired yet.
1562 	 */
1563 	CU_ASSERT(thread->first_timed_poller == poller4);
1564 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1565 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1566 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1567 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1568 
1569 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1570 	spdk_delay_us(500);
1571 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1572 	poll_threads();
1573 
1574 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1575 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1576 	 */
1577 	CU_ASSERT(thread->first_timed_poller == poller3);
1578 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1579 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1580 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1581 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1582 
1583 	spdk_poller_unregister(&poller1);
1584 	spdk_poller_unregister(&poller2);
1585 	spdk_poller_unregister(&poller3);
1586 	spdk_poller_unregister(&poller4);
1587 
1588 	spdk_delay_us(1500);
1589 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1590 	poll_threads();
1591 
1592 	CU_ASSERT(thread->first_timed_poller == NULL);
1593 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1594 
1595 	/*
1596 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1597 	 */
1598 	start_ticks = spdk_get_ticks();
1599 
1600 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1601 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1602 
1603 	CU_ASSERT(thread->first_timed_poller == poller1);
1604 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1605 
1606 	/* after 250 usec, register poller2 and poller3. */
1607 	spdk_delay_us(250);
1608 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1609 
1610 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1611 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1612 
1613 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1614 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1615 
1616 	CU_ASSERT(thread->first_timed_poller == poller1);
1617 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1618 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1619 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1620 
1621 	/* unregister poller2 which is not the closest. */
1622 	tmp = poller2;
1623 	spdk_poller_unregister(&poller2);
1624 
1625 	/* after 250 usec, poller1 is expired. */
1626 	spdk_delay_us(250);
1627 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1628 	poll_threads();
1629 
1630 	/* poller2 is not unregistered yet because it is not expired. */
1631 	CU_ASSERT(thread->first_timed_poller == tmp);
1632 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1633 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1634 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1635 
1636 	spdk_delay_us(250);
1637 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1638 	poll_threads();
1639 
1640 	CU_ASSERT(thread->first_timed_poller == poller3);
1641 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1642 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1643 
1644 	spdk_poller_unregister(&poller3);
1645 
1646 	spdk_delay_us(250);
1647 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1648 	poll_threads();
1649 
1650 	CU_ASSERT(thread->first_timed_poller == poller1);
1651 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1652 
1653 	spdk_poller_unregister(&poller1);
1654 
1655 	spdk_delay_us(500);
1656 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1657 	poll_threads();
1658 
1659 	CU_ASSERT(thread->first_timed_poller == NULL);
1660 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1661 
1662 	free_threads();
1663 }
1664 
1665 static int
1666 dummy_create_cb(void *io_device, void *ctx_buf)
1667 {
1668 	return 0;
1669 }
1670 
1671 static void
1672 dummy_destroy_cb(void *io_device, void *ctx_buf)
1673 {
1674 }
1675 
1676 /* We had a bug that the compare function for the io_device tree
1677  * did not work as expected because subtraction caused overflow
1678  * when the difference between two keys was more than 32 bits.
1679  * This test case verifies the fix for the bug.
1680  */
1681 static void
1682 io_device_lookup(void)
1683 {
1684 	struct io_device dev1, dev2, *dev;
1685 	struct spdk_io_channel *ch;
1686 
1687 	/* The compare function io_device_cmp() had a overflow bug.
1688 	 * Verify the fix first.
1689 	 */
1690 	dev1.io_device = (void *)0x7FFFFFFF;
1691 	dev2.io_device = NULL;
1692 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1693 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1694 
1695 	/* Check if overflow due to 32 bits does not occur. */
1696 	dev1.io_device = (void *)0x80000000;
1697 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1698 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1699 
1700 	dev1.io_device = (void *)0x100000000;
1701 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1702 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1703 
1704 	dev1.io_device = (void *)0x8000000000000000;
1705 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1706 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1707 
1708 	allocate_threads(1);
1709 	set_thread(0);
1710 
1711 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1712 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1713 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1714 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1715 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1716 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1717 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1718 
1719 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1720 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1721 	 */
1722 	dev = RB_MIN(io_device_tree, &g_io_devices);
1723 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1724 	CU_ASSERT(dev->io_device == (void *)0x1);
1725 
1726 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1727 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1728 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1729 
1730 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1731 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1732 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1733 
1734 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1735 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1736 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1737 
1738 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1739 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1740 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1741 
1742 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1743 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1744 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1745 
1746 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1747 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1748 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1749 
1750 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1751 	 * correct io_devices.
1752 	 */
1753 	ch = spdk_get_io_channel((void *)0x1);
1754 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1755 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1756 	spdk_put_io_channel(ch);
1757 
1758 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1759 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1760 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1761 	spdk_put_io_channel(ch);
1762 
1763 	ch = spdk_get_io_channel((void *)0x80000000);
1764 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1765 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1766 	spdk_put_io_channel(ch);
1767 
1768 	ch = spdk_get_io_channel((void *)0x100000000);
1769 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1770 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1771 	spdk_put_io_channel(ch);
1772 
1773 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1774 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1775 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1776 	spdk_put_io_channel(ch);
1777 
1778 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1779 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1780 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1781 	spdk_put_io_channel(ch);
1782 
1783 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1784 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1785 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1786 	spdk_put_io_channel(ch);
1787 
1788 	poll_threads();
1789 
1790 	spdk_io_device_unregister((void *)0x1, NULL);
1791 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1792 	spdk_io_device_unregister((void *)0x80000000, NULL);
1793 	spdk_io_device_unregister((void *)0x100000000, NULL);
1794 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1795 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1796 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1797 
1798 	poll_threads();
1799 
1800 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1801 
1802 	free_threads();
1803 }
1804 
1805 int
1806 main(int argc, char **argv)
1807 {
1808 	CU_pSuite	suite = NULL;
1809 	unsigned int	num_failures;
1810 
1811 	CU_set_error_action(CUEA_ABORT);
1812 	CU_initialize_registry();
1813 
1814 	suite = CU_add_suite("io_channel", NULL, NULL);
1815 
1816 	CU_ADD_TEST(suite, thread_alloc);
1817 	CU_ADD_TEST(suite, thread_send_msg);
1818 	CU_ADD_TEST(suite, thread_poller);
1819 	CU_ADD_TEST(suite, poller_pause);
1820 	CU_ADD_TEST(suite, thread_for_each);
1821 	CU_ADD_TEST(suite, for_each_channel_remove);
1822 	CU_ADD_TEST(suite, for_each_channel_unreg);
1823 	CU_ADD_TEST(suite, thread_name);
1824 	CU_ADD_TEST(suite, channel);
1825 	CU_ADD_TEST(suite, channel_destroy_races);
1826 	CU_ADD_TEST(suite, thread_exit_test);
1827 	CU_ADD_TEST(suite, thread_update_stats_test);
1828 	CU_ADD_TEST(suite, nested_channel);
1829 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1830 	CU_ADD_TEST(suite, cache_closest_timed_poller);
1831 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
1832 	CU_ADD_TEST(suite, io_device_lookup);
1833 
1834 	CU_basic_set_mode(CU_BRM_VERBOSE);
1835 	CU_basic_run_tests();
1836 	num_failures = CU_get_number_of_failures();
1837 	CU_cleanup_registry();
1838 	return num_failures;
1839 }
1840