xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 927f1fd57bd004df581518466ec4c1b8083e5d23)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "thread/thread_internal.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static bool
52 _thread_op_supported(enum spdk_thread_op op)
53 {
54 	switch (op) {
55 	case SPDK_THREAD_OP_NEW:
56 		return true;
57 	default:
58 		return false;
59 	}
60 }
61 
62 static int
63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64 {
65 	switch (op) {
66 	case SPDK_THREAD_OP_NEW:
67 		return _thread_schedule(thread);
68 	default:
69 		return -ENOTSUP;
70 	}
71 }
72 
73 static void
74 thread_alloc(void)
75 {
76 	struct spdk_thread *thread;
77 
78 	/* No schedule callback */
79 	spdk_thread_lib_init(NULL, 0);
80 	thread = spdk_thread_create(NULL, NULL);
81 	SPDK_CU_ASSERT_FATAL(thread != NULL);
82 	spdk_set_thread(thread);
83 	spdk_thread_exit(thread);
84 	while (!spdk_thread_is_exited(thread)) {
85 		spdk_thread_poll(thread, 0, 0);
86 	}
87 	spdk_thread_destroy(thread);
88 	spdk_thread_lib_fini();
89 
90 	/* Schedule callback exists */
91 	spdk_thread_lib_init(_thread_schedule, 0);
92 
93 	/* Scheduling succeeds */
94 	g_sched_rc = 0;
95 	thread = spdk_thread_create(NULL, NULL);
96 	SPDK_CU_ASSERT_FATAL(thread != NULL);
97 	spdk_set_thread(thread);
98 	spdk_thread_exit(thread);
99 	while (!spdk_thread_is_exited(thread)) {
100 		spdk_thread_poll(thread, 0, 0);
101 	}
102 	spdk_thread_destroy(thread);
103 
104 	/* Scheduling fails */
105 	g_sched_rc = -1;
106 	thread = spdk_thread_create(NULL, NULL);
107 	SPDK_CU_ASSERT_FATAL(thread == NULL);
108 
109 	spdk_thread_lib_fini();
110 
111 	/* Scheduling callback exists with extended thread library initialization. */
112 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0,
113 				 SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
114 
115 	/* Scheduling succeeds */
116 	g_sched_rc = 0;
117 	thread = spdk_thread_create(NULL, NULL);
118 	SPDK_CU_ASSERT_FATAL(thread != NULL);
119 	spdk_set_thread(thread);
120 	spdk_thread_exit(thread);
121 	while (!spdk_thread_is_exited(thread)) {
122 		spdk_thread_poll(thread, 0, 0);
123 	}
124 	spdk_thread_destroy(thread);
125 
126 	/* Scheduling fails */
127 	g_sched_rc = -1;
128 	thread = spdk_thread_create(NULL, NULL);
129 	SPDK_CU_ASSERT_FATAL(thread == NULL);
130 
131 	spdk_thread_lib_fini();
132 }
133 
134 static void
135 send_msg_cb(void *ctx)
136 {
137 	bool *done = ctx;
138 
139 	*done = true;
140 }
141 
142 static void
143 thread_send_msg(void)
144 {
145 	struct spdk_thread *thread0;
146 	bool done = false;
147 
148 	allocate_threads(2);
149 	set_thread(0);
150 	thread0 = spdk_get_thread();
151 
152 	set_thread(1);
153 	/* Simulate thread 1 sending a message to thread 0. */
154 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
155 
156 	/* We have not polled thread 0 yet, so done should be false. */
157 	CU_ASSERT(!done);
158 
159 	/*
160 	 * Poll thread 1.  The message was sent to thread 0, so this should be
161 	 *  a nop and done should still be false.
162 	 */
163 	poll_thread(1);
164 	CU_ASSERT(!done);
165 
166 	/*
167 	 * Poll thread 0.  This should execute the message and done should then
168 	 *  be true.
169 	 */
170 	poll_thread(0);
171 	CU_ASSERT(done);
172 
173 	free_threads();
174 }
175 
176 static int
177 poller_run_done(void *ctx)
178 {
179 	bool	*poller_run = ctx;
180 
181 	*poller_run = true;
182 
183 	return -1;
184 }
185 
186 static void
187 thread_poller(void)
188 {
189 	struct spdk_poller	*poller = NULL;
190 	bool			poller_run = false;
191 
192 	allocate_threads(1);
193 
194 	set_thread(0);
195 	MOCK_SET(spdk_get_ticks, 0);
196 	/* Register a poller with no-wait time and test execution */
197 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
198 	CU_ASSERT(poller != NULL);
199 
200 	poll_threads();
201 	CU_ASSERT(poller_run == true);
202 
203 	spdk_poller_unregister(&poller);
204 	CU_ASSERT(poller == NULL);
205 
206 	/* Register a poller with 1000us wait time and test single execution */
207 	poller_run = false;
208 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
209 	CU_ASSERT(poller != NULL);
210 
211 	poll_threads();
212 	CU_ASSERT(poller_run == false);
213 
214 	spdk_delay_us(1000);
215 	poll_threads();
216 	CU_ASSERT(poller_run == true);
217 
218 	poller_run = false;
219 	poll_threads();
220 	CU_ASSERT(poller_run == false);
221 
222 	spdk_delay_us(1000);
223 	poll_threads();
224 	CU_ASSERT(poller_run == true);
225 
226 	spdk_poller_unregister(&poller);
227 	CU_ASSERT(poller == NULL);
228 
229 	free_threads();
230 }
231 
232 struct poller_ctx {
233 	struct spdk_poller	*poller;
234 	bool			run;
235 };
236 
237 static int
238 poller_run_pause(void *ctx)
239 {
240 	struct poller_ctx *poller_ctx = ctx;
241 
242 	poller_ctx->run = true;
243 	spdk_poller_pause(poller_ctx->poller);
244 
245 	return 0;
246 }
247 
248 /* Verify the same poller can be switched multiple times between
249  * pause and resume while it runs.
250  */
251 static int
252 poller_run_pause_resume_pause(void *ctx)
253 {
254 	struct poller_ctx *poller_ctx = ctx;
255 
256 	poller_ctx->run = true;
257 
258 	spdk_poller_pause(poller_ctx->poller);
259 	spdk_poller_resume(poller_ctx->poller);
260 	spdk_poller_pause(poller_ctx->poller);
261 
262 	return 0;
263 }
264 
265 static void
266 poller_msg_pause_cb(void *ctx)
267 {
268 	struct spdk_poller *poller = ctx;
269 
270 	spdk_poller_pause(poller);
271 }
272 
273 static void
274 poller_msg_resume_cb(void *ctx)
275 {
276 	struct spdk_poller *poller = ctx;
277 
278 	spdk_poller_resume(poller);
279 }
280 
281 static void
282 poller_pause(void)
283 {
284 	struct poller_ctx poller_ctx = {};
285 	unsigned int delay[] = { 0, 1000 };
286 	unsigned int i;
287 
288 	allocate_threads(1);
289 	set_thread(0);
290 
291 	/* Register a poller that pauses itself */
292 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
293 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
294 
295 	poller_ctx.run = false;
296 	poll_threads();
297 	CU_ASSERT_EQUAL(poller_ctx.run, true);
298 
299 	poller_ctx.run = false;
300 	poll_threads();
301 	CU_ASSERT_EQUAL(poller_ctx.run, false);
302 
303 	spdk_poller_unregister(&poller_ctx.poller);
304 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
305 
306 	/* Register a poller that switches between pause and resume itself */
307 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
308 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
309 
310 	poller_ctx.run = false;
311 	poll_threads();
312 	CU_ASSERT_EQUAL(poller_ctx.run, true);
313 
314 	poller_ctx.run = false;
315 	poll_threads();
316 	CU_ASSERT_EQUAL(poller_ctx.run, false);
317 
318 	spdk_poller_unregister(&poller_ctx.poller);
319 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
320 
321 	/* Verify that resuming an unpaused poller doesn't do anything */
322 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
323 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
324 
325 	spdk_poller_resume(poller_ctx.poller);
326 
327 	poller_ctx.run = false;
328 	poll_threads();
329 	CU_ASSERT_EQUAL(poller_ctx.run, true);
330 
331 	/* Verify that pausing the same poller twice works too */
332 	spdk_poller_pause(poller_ctx.poller);
333 
334 	poller_ctx.run = false;
335 	poll_threads();
336 	CU_ASSERT_EQUAL(poller_ctx.run, false);
337 
338 	spdk_poller_pause(poller_ctx.poller);
339 	poll_threads();
340 	CU_ASSERT_EQUAL(poller_ctx.run, false);
341 
342 	spdk_poller_resume(poller_ctx.poller);
343 	poll_threads();
344 	CU_ASSERT_EQUAL(poller_ctx.run, true);
345 
346 	/* Verify that a poller is run when it's resumed immediately after pausing */
347 	poller_ctx.run = false;
348 	spdk_poller_pause(poller_ctx.poller);
349 	spdk_poller_resume(poller_ctx.poller);
350 	poll_threads();
351 	CU_ASSERT_EQUAL(poller_ctx.run, true);
352 
353 	spdk_poller_unregister(&poller_ctx.poller);
354 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
355 
356 	/* Poll the thread to make sure the previous poller gets unregistered */
357 	poll_threads();
358 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
359 
360 	/* Verify that it's possible to unregister a paused poller */
361 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
362 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
363 
364 	poller_ctx.run = false;
365 	poll_threads();
366 	CU_ASSERT_EQUAL(poller_ctx.run, true);
367 
368 	spdk_poller_pause(poller_ctx.poller);
369 
370 	poller_ctx.run = false;
371 	poll_threads();
372 	CU_ASSERT_EQUAL(poller_ctx.run, false);
373 
374 	spdk_poller_unregister(&poller_ctx.poller);
375 
376 	poll_threads();
377 	CU_ASSERT_EQUAL(poller_ctx.run, false);
378 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
379 
380 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
381 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
382 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
383 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
384 
385 		spdk_delay_us(delay[i]);
386 		poller_ctx.run = false;
387 		poll_threads();
388 		CU_ASSERT_EQUAL(poller_ctx.run, true);
389 
390 		spdk_poller_pause(poller_ctx.poller);
391 
392 		spdk_delay_us(delay[i]);
393 		poller_ctx.run = false;
394 		poll_threads();
395 		CU_ASSERT_EQUAL(poller_ctx.run, false);
396 
397 		spdk_poller_resume(poller_ctx.poller);
398 
399 		spdk_delay_us(delay[i]);
400 		poll_threads();
401 		CU_ASSERT_EQUAL(poller_ctx.run, true);
402 
403 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
404 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
405 
406 		spdk_delay_us(delay[i]);
407 		poller_ctx.run = false;
408 		poll_threads();
409 		CU_ASSERT_EQUAL(poller_ctx.run, false);
410 
411 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
412 
413 		poll_threads();
414 		if (delay[i] > 0) {
415 			spdk_delay_us(delay[i]);
416 			poll_threads();
417 		}
418 		CU_ASSERT_EQUAL(poller_ctx.run, true);
419 
420 		spdk_poller_unregister(&poller_ctx.poller);
421 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
422 
423 		/* Register a timed poller that pauses itself */
424 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
425 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
426 
427 		spdk_delay_us(delay[i]);
428 		poller_ctx.run = false;
429 		poll_threads();
430 		CU_ASSERT_EQUAL(poller_ctx.run, true);
431 
432 		poller_ctx.run = false;
433 		spdk_delay_us(delay[i]);
434 		poll_threads();
435 		CU_ASSERT_EQUAL(poller_ctx.run, false);
436 
437 		spdk_poller_resume(poller_ctx.poller);
438 
439 		CU_ASSERT_EQUAL(poller_ctx.run, false);
440 		spdk_delay_us(delay[i]);
441 		poll_threads();
442 		CU_ASSERT_EQUAL(poller_ctx.run, true);
443 
444 		spdk_poller_unregister(&poller_ctx.poller);
445 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
446 
447 		/* Register a timed poller that switches between pause and resume itself */
448 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
449 				    &poller_ctx, delay[i]);
450 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
451 
452 		spdk_delay_us(delay[i]);
453 		poller_ctx.run = false;
454 		poll_threads();
455 		CU_ASSERT_EQUAL(poller_ctx.run, true);
456 
457 		poller_ctx.run = false;
458 		spdk_delay_us(delay[i]);
459 		poll_threads();
460 		CU_ASSERT_EQUAL(poller_ctx.run, false);
461 
462 		spdk_poller_resume(poller_ctx.poller);
463 
464 		CU_ASSERT_EQUAL(poller_ctx.run, false);
465 		spdk_delay_us(delay[i]);
466 		poll_threads();
467 		CU_ASSERT_EQUAL(poller_ctx.run, true);
468 
469 		spdk_poller_unregister(&poller_ctx.poller);
470 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
471 	}
472 
473 	free_threads();
474 }
475 
476 static void
477 for_each_cb(void *ctx)
478 {
479 	int *count = ctx;
480 
481 	(*count)++;
482 }
483 
484 static void
485 thread_for_each(void)
486 {
487 	int count = 0;
488 	int i;
489 
490 	allocate_threads(3);
491 	set_thread(0);
492 
493 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
494 
495 	/* We have not polled thread 0 yet, so count should be 0 */
496 	CU_ASSERT(count == 0);
497 
498 	/* Poll each thread to verify the message is passed to each */
499 	for (i = 0; i < 3; i++) {
500 		poll_thread(i);
501 		CU_ASSERT(count == (i + 1));
502 	}
503 
504 	/*
505 	 * After each thread is called, the completion calls it
506 	 * one more time.
507 	 */
508 	poll_thread(0);
509 	CU_ASSERT(count == 4);
510 
511 	free_threads();
512 }
513 
514 static int
515 channel_create(void *io_device, void *ctx_buf)
516 {
517 	int *ch_count = io_device;
518 
519 	(*ch_count)++;
520 	return 0;
521 }
522 
523 static void
524 channel_destroy(void *io_device, void *ctx_buf)
525 {
526 	int *ch_count = io_device;
527 
528 	(*ch_count)--;
529 }
530 
531 static void
532 channel_msg(struct spdk_io_channel_iter *i)
533 {
534 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
535 
536 	(*msg_count)++;
537 	spdk_for_each_channel_continue(i, 0);
538 }
539 
540 static void
541 channel_cpl(struct spdk_io_channel_iter *i, int status)
542 {
543 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
544 
545 	(*msg_count)++;
546 }
547 
548 static void
549 for_each_channel_remove(void)
550 {
551 	struct spdk_io_channel *ch0, *ch1, *ch2;
552 	int ch_count = 0;
553 	int msg_count = 0;
554 
555 	allocate_threads(3);
556 	set_thread(0);
557 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
558 	ch0 = spdk_get_io_channel(&ch_count);
559 	set_thread(1);
560 	ch1 = spdk_get_io_channel(&ch_count);
561 	set_thread(2);
562 	ch2 = spdk_get_io_channel(&ch_count);
563 	CU_ASSERT(ch_count == 3);
564 
565 	/*
566 	 * Test that io_channel handles the case where we start to iterate through
567 	 *  the channels, and during the iteration, one of the channels is deleted.
568 	 * This is done in some different and sometimes non-intuitive orders, because
569 	 *  some operations are deferred and won't execute until their threads are
570 	 *  polled.
571 	 *
572 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
573 	 */
574 	set_thread(0);
575 	spdk_put_io_channel(ch0);
576 	CU_ASSERT(ch_count == 3);
577 	poll_threads();
578 	CU_ASSERT(ch_count == 2);
579 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
580 	CU_ASSERT(msg_count == 0);
581 	poll_threads();
582 	CU_ASSERT(msg_count == 3);
583 
584 	msg_count = 0;
585 
586 	/*
587 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
588 	 *  thread 0 is polled.
589 	 */
590 	ch0 = spdk_get_io_channel(&ch_count);
591 	CU_ASSERT(ch_count == 3);
592 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
593 	spdk_put_io_channel(ch0);
594 	CU_ASSERT(ch_count == 3);
595 
596 	poll_threads();
597 	CU_ASSERT(ch_count == 2);
598 	CU_ASSERT(msg_count == 4);
599 	set_thread(1);
600 	spdk_put_io_channel(ch1);
601 	CU_ASSERT(ch_count == 2);
602 	set_thread(2);
603 	spdk_put_io_channel(ch2);
604 	CU_ASSERT(ch_count == 2);
605 	poll_threads();
606 	CU_ASSERT(ch_count == 0);
607 
608 	spdk_io_device_unregister(&ch_count, NULL);
609 	poll_threads();
610 
611 	free_threads();
612 }
613 
614 struct unreg_ctx {
615 	bool	ch_done;
616 	bool	foreach_done;
617 };
618 
619 static void
620 unreg_ch_done(struct spdk_io_channel_iter *i)
621 {
622 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
623 
624 	ctx->ch_done = true;
625 
626 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
627 	spdk_for_each_channel_continue(i, 0);
628 }
629 
630 static void
631 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
632 {
633 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
634 
635 	ctx->foreach_done = true;
636 }
637 
638 static void
639 for_each_channel_unreg(void)
640 {
641 	struct spdk_io_channel *ch0;
642 	struct io_device *dev;
643 	struct unreg_ctx ctx = {};
644 	int io_target = 0;
645 
646 	allocate_threads(1);
647 	set_thread(0);
648 	CU_ASSERT(RB_EMPTY(&g_io_devices));
649 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
650 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
651 	dev = RB_MIN(io_device_tree, &g_io_devices);
652 	SPDK_CU_ASSERT_FATAL(dev != NULL);
653 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
654 	ch0 = spdk_get_io_channel(&io_target);
655 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
656 
657 	spdk_io_device_unregister(&io_target, NULL);
658 	/*
659 	 * There is an outstanding foreach call on the io_device, so the unregister should not
660 	 *  have removed the device.
661 	 */
662 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
663 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
664 	/*
665 	 * There is already a device registered at &io_target, so a new io_device should not
666 	 *  have been added to g_io_devices.
667 	 */
668 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
669 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
670 
671 	poll_thread(0);
672 	CU_ASSERT(ctx.ch_done == true);
673 	CU_ASSERT(ctx.foreach_done == true);
674 	/*
675 	 * There are no more foreach operations outstanding, so we can unregister the device,
676 	 *  even though a channel still exists for the device.
677 	 */
678 	spdk_io_device_unregister(&io_target, NULL);
679 	CU_ASSERT(RB_EMPTY(&g_io_devices));
680 
681 	set_thread(0);
682 	spdk_put_io_channel(ch0);
683 
684 	poll_threads();
685 
686 	free_threads();
687 }
688 
689 static void
690 thread_name(void)
691 {
692 	struct spdk_thread *thread;
693 	const char *name;
694 
695 	spdk_thread_lib_init(NULL, 0);
696 
697 	/* Create thread with no name, which automatically generates one */
698 	thread = spdk_thread_create(NULL, NULL);
699 	spdk_set_thread(thread);
700 	thread = spdk_get_thread();
701 	SPDK_CU_ASSERT_FATAL(thread != NULL);
702 	name = spdk_thread_get_name(thread);
703 	CU_ASSERT(name != NULL);
704 	spdk_thread_exit(thread);
705 	while (!spdk_thread_is_exited(thread)) {
706 		spdk_thread_poll(thread, 0, 0);
707 	}
708 	spdk_thread_destroy(thread);
709 
710 	/* Create thread named "test_thread" */
711 	thread = spdk_thread_create("test_thread", NULL);
712 	spdk_set_thread(thread);
713 	thread = spdk_get_thread();
714 	SPDK_CU_ASSERT_FATAL(thread != NULL);
715 	name = spdk_thread_get_name(thread);
716 	SPDK_CU_ASSERT_FATAL(name != NULL);
717 	CU_ASSERT(strcmp(name, "test_thread") == 0);
718 	spdk_thread_exit(thread);
719 	while (!spdk_thread_is_exited(thread)) {
720 		spdk_thread_poll(thread, 0, 0);
721 	}
722 	spdk_thread_destroy(thread);
723 
724 	spdk_thread_lib_fini();
725 }
726 
727 static uint64_t g_device1;
728 static uint64_t g_device2;
729 static uint64_t g_device3;
730 
731 static uint64_t g_ctx1 = 0x1111;
732 static uint64_t g_ctx2 = 0x2222;
733 
734 static int g_create_cb_calls = 0;
735 static int g_destroy_cb_calls = 0;
736 
737 static int
738 create_cb_1(void *io_device, void *ctx_buf)
739 {
740 	CU_ASSERT(io_device == &g_device1);
741 	*(uint64_t *)ctx_buf = g_ctx1;
742 	g_create_cb_calls++;
743 	return 0;
744 }
745 
746 static void
747 destroy_cb_1(void *io_device, void *ctx_buf)
748 {
749 	CU_ASSERT(io_device == &g_device1);
750 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
751 	g_destroy_cb_calls++;
752 }
753 
754 static int
755 create_cb_2(void *io_device, void *ctx_buf)
756 {
757 	CU_ASSERT(io_device == &g_device2);
758 	*(uint64_t *)ctx_buf = g_ctx2;
759 	g_create_cb_calls++;
760 	return 0;
761 }
762 
763 static void
764 destroy_cb_2(void *io_device, void *ctx_buf)
765 {
766 	CU_ASSERT(io_device == &g_device2);
767 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
768 	g_destroy_cb_calls++;
769 }
770 
771 static void
772 channel(void)
773 {
774 	struct spdk_io_channel *ch1, *ch2;
775 	void *ctx;
776 
777 	allocate_threads(1);
778 	set_thread(0);
779 
780 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
781 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
782 
783 	g_create_cb_calls = 0;
784 	ch1 = spdk_get_io_channel(&g_device1);
785 	CU_ASSERT(g_create_cb_calls == 1);
786 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
787 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
788 
789 	g_create_cb_calls = 0;
790 	ch2 = spdk_get_io_channel(&g_device1);
791 	CU_ASSERT(g_create_cb_calls == 0);
792 	CU_ASSERT(ch1 == ch2);
793 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
794 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
795 
796 	g_destroy_cb_calls = 0;
797 	spdk_put_io_channel(ch2);
798 	poll_threads();
799 	CU_ASSERT(g_destroy_cb_calls == 0);
800 
801 	g_create_cb_calls = 0;
802 	ch2 = spdk_get_io_channel(&g_device2);
803 	CU_ASSERT(g_create_cb_calls == 1);
804 	CU_ASSERT(ch1 != ch2);
805 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
806 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
807 
808 	ctx = spdk_io_channel_get_ctx(ch2);
809 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
810 
811 	g_destroy_cb_calls = 0;
812 	spdk_put_io_channel(ch1);
813 	poll_threads();
814 	CU_ASSERT(g_destroy_cb_calls == 1);
815 
816 	g_destroy_cb_calls = 0;
817 	spdk_put_io_channel(ch2);
818 	poll_threads();
819 	CU_ASSERT(g_destroy_cb_calls == 1);
820 
821 	ch1 = spdk_get_io_channel(&g_device3);
822 	CU_ASSERT(ch1 == NULL);
823 
824 	spdk_io_device_unregister(&g_device1, NULL);
825 	poll_threads();
826 	spdk_io_device_unregister(&g_device2, NULL);
827 	poll_threads();
828 	CU_ASSERT(RB_EMPTY(&g_io_devices));
829 	free_threads();
830 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
831 }
832 
833 static int
834 create_cb(void *io_device, void *ctx_buf)
835 {
836 	uint64_t *refcnt = (uint64_t *)ctx_buf;
837 
838 	CU_ASSERT(*refcnt == 0);
839 	*refcnt = 1;
840 
841 	return 0;
842 }
843 
844 static void
845 destroy_cb(void *io_device, void *ctx_buf)
846 {
847 	uint64_t *refcnt = (uint64_t *)ctx_buf;
848 
849 	CU_ASSERT(*refcnt == 1);
850 	*refcnt = 0;
851 }
852 
853 /**
854  * This test is checking that a sequence of get, put, get, put without allowing
855  * the deferred put operation to complete doesn't result in releasing the memory
856  * for the channel twice.
857  */
858 static void
859 channel_destroy_races(void)
860 {
861 	uint64_t device;
862 	struct spdk_io_channel *ch;
863 
864 	allocate_threads(1);
865 	set_thread(0);
866 
867 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
868 
869 	ch = spdk_get_io_channel(&device);
870 	SPDK_CU_ASSERT_FATAL(ch != NULL);
871 
872 	spdk_put_io_channel(ch);
873 
874 	ch = spdk_get_io_channel(&device);
875 	SPDK_CU_ASSERT_FATAL(ch != NULL);
876 
877 	spdk_put_io_channel(ch);
878 	poll_threads();
879 
880 	spdk_io_device_unregister(&device, NULL);
881 	poll_threads();
882 
883 	CU_ASSERT(RB_EMPTY(&g_io_devices));
884 	free_threads();
885 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
886 }
887 
888 static void
889 thread_exit_test(void)
890 {
891 	struct spdk_thread *thread;
892 	struct spdk_io_channel *ch;
893 	struct spdk_poller *poller1, *poller2;
894 	void *ctx;
895 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
896 	int rc __attribute__((unused));
897 
898 	MOCK_SET(spdk_get_ticks, 10);
899 	MOCK_SET(spdk_get_ticks_hz, 1);
900 
901 	allocate_threads(4);
902 
903 	/* Test if all pending messages are reaped for the exiting thread, and the
904 	 * thread moves to the exited state.
905 	 */
906 	set_thread(0);
907 	thread = spdk_get_thread();
908 
909 	/* Sending message to thread 0 will be accepted. */
910 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
911 	CU_ASSERT(rc == 0);
912 	CU_ASSERT(!done1);
913 
914 	/* Move thread 0 to the exiting state. */
915 	spdk_thread_exit(thread);
916 
917 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
918 
919 	/* Sending message to thread 0 will be still accepted. */
920 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
921 	CU_ASSERT(rc == 0);
922 
923 	/* Thread 0 will reap pending messages. */
924 	poll_thread(0);
925 	CU_ASSERT(done1 == true);
926 	CU_ASSERT(done2 == true);
927 
928 	/* Thread 0 will move to the exited state. */
929 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
930 
931 	/* Test releasing I/O channel is reaped even after the thread moves to
932 	 * the exiting state
933 	 */
934 	set_thread(1);
935 
936 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
937 
938 	g_create_cb_calls = 0;
939 	ch = spdk_get_io_channel(&g_device1);
940 	CU_ASSERT(g_create_cb_calls == 1);
941 	SPDK_CU_ASSERT_FATAL(ch != NULL);
942 
943 	ctx = spdk_io_channel_get_ctx(ch);
944 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
945 
946 	g_destroy_cb_calls = 0;
947 	spdk_put_io_channel(ch);
948 
949 	thread = spdk_get_thread();
950 	spdk_thread_exit(thread);
951 
952 	/* Thread 1 will not move to the exited state yet because I/O channel release
953 	 * does not complete yet.
954 	 */
955 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
956 
957 	/* Thread 1 will be able to get the another reference of I/O channel
958 	 * even after the thread moves to the exiting state.
959 	 */
960 	g_create_cb_calls = 0;
961 	ch = spdk_get_io_channel(&g_device1);
962 
963 	CU_ASSERT(g_create_cb_calls == 0);
964 	SPDK_CU_ASSERT_FATAL(ch != NULL);
965 
966 	ctx = spdk_io_channel_get_ctx(ch);
967 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
968 
969 	spdk_put_io_channel(ch);
970 
971 	poll_threads();
972 	CU_ASSERT(g_destroy_cb_calls == 1);
973 
974 	/* Thread 1 will move to the exited state after I/O channel is released.
975 	 * are released.
976 	 */
977 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
978 
979 	spdk_io_device_unregister(&g_device1, NULL);
980 	poll_threads();
981 
982 	/* Test if unregistering poller is reaped for the exiting thread, and the
983 	 * thread moves to the exited thread.
984 	 */
985 	set_thread(2);
986 	thread = spdk_get_thread();
987 
988 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
989 	CU_ASSERT(poller1 != NULL);
990 
991 	spdk_poller_unregister(&poller1);
992 
993 	spdk_thread_exit(thread);
994 
995 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
996 
997 	poll_threads();
998 
999 	CU_ASSERT(poller1_run == false);
1000 	CU_ASSERT(poller2_run == true);
1001 
1002 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1003 
1004 	spdk_poller_unregister(&poller2);
1005 
1006 	poll_threads();
1007 
1008 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1009 
1010 	/* Test if the exiting thread is exited forcefully after timeout. */
1011 	set_thread(3);
1012 	thread = spdk_get_thread();
1013 
1014 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
1015 	CU_ASSERT(poller1 != NULL);
1016 
1017 	spdk_thread_exit(thread);
1018 
1019 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1020 
1021 	MOCK_SET(spdk_get_ticks, 11);
1022 
1023 	poll_threads();
1024 
1025 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1026 
1027 	/* Cause timeout forcefully. */
1028 	MOCK_SET(spdk_get_ticks, 15);
1029 
1030 	poll_threads();
1031 
1032 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1033 
1034 	spdk_poller_unregister(&poller1);
1035 
1036 	poll_threads();
1037 
1038 	MOCK_CLEAR(spdk_get_ticks);
1039 	MOCK_CLEAR(spdk_get_ticks_hz);
1040 
1041 	free_threads();
1042 }
1043 
1044 static int
1045 poller_run_idle(void *ctx)
1046 {
1047 	uint64_t delay_us = (uint64_t)ctx;
1048 
1049 	spdk_delay_us(delay_us);
1050 
1051 	return 0;
1052 }
1053 
1054 static int
1055 poller_run_busy(void *ctx)
1056 {
1057 	uint64_t delay_us = (uint64_t)ctx;
1058 
1059 	spdk_delay_us(delay_us);
1060 
1061 	return 1;
1062 }
1063 
1064 static void
1065 thread_update_stats_test(void)
1066 {
1067 	struct spdk_poller	*poller;
1068 	struct spdk_thread	*thread;
1069 
1070 	MOCK_SET(spdk_get_ticks, 10);
1071 
1072 	allocate_threads(1);
1073 
1074 	set_thread(0);
1075 	thread = spdk_get_thread();
1076 
1077 	CU_ASSERT(thread->tsc_last == 10);
1078 	CU_ASSERT(thread->stats.idle_tsc == 0);
1079 	CU_ASSERT(thread->stats.busy_tsc == 0);
1080 
1081 	/* Test if idle_tsc is updated expectedly. */
1082 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1083 	CU_ASSERT(poller != NULL);
1084 
1085 	spdk_delay_us(100);
1086 
1087 	poll_thread_times(0, 1);
1088 
1089 	CU_ASSERT(thread->tsc_last == 1110);
1090 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1091 	CU_ASSERT(thread->stats.busy_tsc == 0);
1092 
1093 	spdk_delay_us(100);
1094 
1095 	poll_thread_times(0, 1);
1096 
1097 	CU_ASSERT(thread->tsc_last == 2210);
1098 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1099 	CU_ASSERT(thread->stats.busy_tsc == 0);
1100 
1101 	spdk_poller_unregister(&poller);
1102 
1103 	/* Test if busy_tsc is updated expectedly. */
1104 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1105 	CU_ASSERT(poller != NULL);
1106 
1107 	spdk_delay_us(10000);
1108 
1109 	poll_thread_times(0, 1);
1110 
1111 	CU_ASSERT(thread->tsc_last == 112210);
1112 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1113 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1114 
1115 	spdk_delay_us(10000);
1116 
1117 	poll_thread_times(0, 1);
1118 
1119 	CU_ASSERT(thread->tsc_last == 222210);
1120 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1121 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1122 
1123 	spdk_poller_unregister(&poller);
1124 
1125 	MOCK_CLEAR(spdk_get_ticks);
1126 
1127 	free_threads();
1128 }
1129 
1130 struct ut_nested_ch {
1131 	struct spdk_io_channel *child;
1132 	struct spdk_poller *poller;
1133 };
1134 
1135 struct ut_nested_dev {
1136 	struct ut_nested_dev *child;
1137 };
1138 
1139 static int
1140 ut_null_poll(void *ctx)
1141 {
1142 	return -1;
1143 }
1144 
1145 static int
1146 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1147 {
1148 	struct ut_nested_ch *_ch = ctx_buf;
1149 	struct ut_nested_dev *_dev = io_device;
1150 	struct ut_nested_dev *_child;
1151 
1152 	_child = _dev->child;
1153 
1154 	if (_child != NULL) {
1155 		_ch->child = spdk_get_io_channel(_child);
1156 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1157 	} else {
1158 		_ch->child = NULL;
1159 	}
1160 
1161 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1162 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1163 
1164 	return 0;
1165 }
1166 
1167 static void
1168 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1169 {
1170 	struct ut_nested_ch *_ch = ctx_buf;
1171 	struct spdk_io_channel *child;
1172 
1173 	child = _ch->child;
1174 	if (child != NULL) {
1175 		spdk_put_io_channel(child);
1176 	}
1177 
1178 	spdk_poller_unregister(&_ch->poller);
1179 }
1180 
1181 static void
1182 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1183 {
1184 	CU_ASSERT(ch->ref == 1);
1185 	CU_ASSERT(ch->dev == dev);
1186 	CU_ASSERT(dev->refcnt == 1);
1187 }
1188 
1189 static void
1190 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1191 {
1192 	CU_ASSERT(ch->ref == 0);
1193 	CU_ASSERT(ch->destroy_ref == 1);
1194 	CU_ASSERT(dev->refcnt == 1);
1195 }
1196 
1197 static void
1198 ut_check_nested_ch_destroy_post(struct io_device *dev)
1199 {
1200 	CU_ASSERT(dev->refcnt == 0);
1201 }
1202 
1203 static void
1204 ut_check_nested_poller_register(struct spdk_poller *poller)
1205 {
1206 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1207 }
1208 
1209 static void
1210 nested_channel(void)
1211 {
1212 	struct ut_nested_dev _dev1, _dev2, _dev3;
1213 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1214 	struct io_device *dev1, *dev2, *dev3;
1215 	struct spdk_io_channel *ch1, *ch2, *ch3;
1216 	struct spdk_poller *poller;
1217 	struct spdk_thread *thread;
1218 
1219 	allocate_threads(1);
1220 	set_thread(0);
1221 
1222 	thread = spdk_get_thread();
1223 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1224 
1225 	_dev1.child = &_dev2;
1226 	_dev2.child = &_dev3;
1227 	_dev3.child = NULL;
1228 
1229 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1230 				sizeof(struct ut_nested_ch), "dev1");
1231 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1232 				sizeof(struct ut_nested_ch), "dev2");
1233 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1234 				sizeof(struct ut_nested_ch), "dev3");
1235 
1236 	dev1 = io_device_get(&_dev1);
1237 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1238 	dev2 = io_device_get(&_dev2);
1239 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1240 	dev3 = io_device_get(&_dev3);
1241 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1242 
1243 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1244 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1245 	 */
1246 	ch1 = spdk_get_io_channel(&_dev1);
1247 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1248 
1249 	_ch1 = spdk_io_channel_get_ctx(ch1);
1250 	ch2 = _ch1->child;
1251 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1252 
1253 	_ch2 = spdk_io_channel_get_ctx(ch2);
1254 	ch3 = _ch2->child;
1255 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1256 
1257 	_ch3 = spdk_io_channel_get_ctx(ch3);
1258 	CU_ASSERT(_ch3->child == NULL);
1259 
1260 	ut_check_nested_ch_create(ch1, dev1);
1261 	ut_check_nested_ch_create(ch2, dev2);
1262 	ut_check_nested_ch_create(ch3, dev3);
1263 
1264 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1265 
1266 	ut_check_nested_poller_register(poller);
1267 	ut_check_nested_poller_register(_ch1->poller);
1268 	ut_check_nested_poller_register(_ch2->poller);
1269 	ut_check_nested_poller_register(_ch3->poller);
1270 
1271 	spdk_poller_unregister(&poller);
1272 	poll_thread_times(0, 1);
1273 
1274 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1275 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1276 	 */
1277 	spdk_put_io_channel(ch1);
1278 
1279 	/* Start exiting the current thread after unregistering the non-nested
1280 	 * I/O channel.
1281 	 */
1282 	spdk_thread_exit(thread);
1283 
1284 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1285 	poll_thread_times(0, 1);
1286 	ut_check_nested_ch_destroy_post(dev1);
1287 
1288 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1289 
1290 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1291 	poll_thread_times(0, 1);
1292 	ut_check_nested_ch_destroy_post(dev2);
1293 
1294 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1295 
1296 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1297 	poll_thread_times(0, 1);
1298 	ut_check_nested_ch_destroy_post(dev3);
1299 
1300 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1301 
1302 	spdk_io_device_unregister(&_dev1, NULL);
1303 	spdk_io_device_unregister(&_dev2, NULL);
1304 	spdk_io_device_unregister(&_dev3, NULL);
1305 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1306 
1307 	free_threads();
1308 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1309 }
1310 
1311 static int
1312 create_cb2(void *io_device, void *ctx_buf)
1313 {
1314 	uint64_t *devcnt = (uint64_t *)io_device;
1315 
1316 	*devcnt += 1;
1317 
1318 	return 0;
1319 }
1320 
1321 static void
1322 destroy_cb2(void *io_device, void *ctx_buf)
1323 {
1324 	uint64_t *devcnt = (uint64_t *)io_device;
1325 
1326 	CU_ASSERT(*devcnt > 0);
1327 	*devcnt -= 1;
1328 }
1329 
1330 static void
1331 unregister_cb2(void *io_device)
1332 {
1333 	uint64_t *devcnt = (uint64_t *)io_device;
1334 
1335 	CU_ASSERT(*devcnt == 0);
1336 }
1337 
1338 static void
1339 device_unregister_and_thread_exit_race(void)
1340 {
1341 	uint64_t device = 0;
1342 	struct spdk_io_channel *ch1, *ch2;
1343 	struct spdk_thread *thread1, *thread2;
1344 
1345 	/* Create two threads and each thread gets a channel from the same device. */
1346 	allocate_threads(2);
1347 	set_thread(0);
1348 
1349 	thread1 = spdk_get_thread();
1350 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1351 
1352 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1353 
1354 	ch1 = spdk_get_io_channel(&device);
1355 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1356 
1357 	set_thread(1);
1358 
1359 	thread2 = spdk_get_thread();
1360 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1361 
1362 	ch2 = spdk_get_io_channel(&device);
1363 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1364 
1365 	set_thread(0);
1366 
1367 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1368 	 * and a device are released.
1369 	 */
1370 	spdk_thread_exit(thread1);
1371 	poll_thread(0);
1372 
1373 	spdk_put_io_channel(ch1);
1374 
1375 	spdk_io_device_unregister(&device, unregister_cb2);
1376 	poll_thread(0);
1377 
1378 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1379 
1380 	set_thread(1);
1381 
1382 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1383 	 * is released.
1384 	 */
1385 	spdk_thread_exit(thread2);
1386 	poll_thread(1);
1387 
1388 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1389 
1390 	spdk_put_io_channel(ch2);
1391 	poll_thread(1);
1392 
1393 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1394 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1395 
1396 	poll_thread(0);
1397 
1398 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1399 
1400 	free_threads();
1401 }
1402 
1403 static int
1404 dummy_poller(void *arg)
1405 {
1406 	return SPDK_POLLER_IDLE;
1407 }
1408 
1409 static void
1410 cache_closest_timed_poller(void)
1411 {
1412 	struct spdk_thread *thread;
1413 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1414 
1415 	allocate_threads(1);
1416 	set_thread(0);
1417 
1418 	thread = spdk_get_thread();
1419 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1420 
1421 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1422 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1423 
1424 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1425 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1426 
1427 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1428 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1429 
1430 	poll_threads();
1431 
1432 	/* When multiple timed pollers are inserted, the cache should
1433 	 * have the closest timed poller.
1434 	 */
1435 	CU_ASSERT(thread->first_timed_poller == poller1);
1436 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1437 
1438 	spdk_delay_us(1000);
1439 	poll_threads();
1440 
1441 	CU_ASSERT(thread->first_timed_poller == poller2);
1442 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1443 
1444 	/* If we unregister a timed poller by spdk_poller_unregister()
1445 	 * when it is waiting, it is marked as being unregistered and
1446 	 * is actually unregistered when it is expired.
1447 	 *
1448 	 * Hence if we unregister the closest timed poller when it is waiting,
1449 	 * the cache is not updated to the next timed poller until it is expired.
1450 	 */
1451 	tmp = poller2;
1452 
1453 	spdk_poller_unregister(&poller2);
1454 	CU_ASSERT(poller2 == NULL);
1455 
1456 	spdk_delay_us(499);
1457 	poll_threads();
1458 
1459 	CU_ASSERT(thread->first_timed_poller == tmp);
1460 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1461 
1462 	spdk_delay_us(1);
1463 	poll_threads();
1464 
1465 	CU_ASSERT(thread->first_timed_poller == poller3);
1466 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1467 
1468 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1469 	 * it is marked as being paused and is actually paused when it is expired.
1470 	 *
1471 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1472 	 * is not updated to the next timed poller until it is expired.
1473 	 */
1474 	spdk_poller_pause(poller3);
1475 
1476 	spdk_delay_us(299);
1477 	poll_threads();
1478 
1479 	CU_ASSERT(thread->first_timed_poller == poller3);
1480 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1481 
1482 	spdk_delay_us(1);
1483 	poll_threads();
1484 
1485 	CU_ASSERT(thread->first_timed_poller == poller1);
1486 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1487 
1488 	/* After unregistering all timed pollers, the cache should
1489 	 * be NULL.
1490 	 */
1491 	spdk_poller_unregister(&poller1);
1492 	spdk_poller_unregister(&poller3);
1493 
1494 	spdk_delay_us(200);
1495 	poll_threads();
1496 
1497 	CU_ASSERT(thread->first_timed_poller == NULL);
1498 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1499 
1500 	free_threads();
1501 }
1502 
1503 static void
1504 multi_timed_pollers_have_same_expiration(void)
1505 {
1506 	struct spdk_thread *thread;
1507 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1508 	uint64_t start_ticks;
1509 
1510 	allocate_threads(1);
1511 	set_thread(0);
1512 
1513 	thread = spdk_get_thread();
1514 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1515 
1516 	/*
1517 	 * case 1: multiple timed pollers have the same next_run_tick.
1518 	 */
1519 	start_ticks = spdk_get_ticks();
1520 
1521 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1522 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1523 
1524 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1525 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1526 
1527 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1528 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1529 
1530 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1531 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1532 
1533 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1534 	 * because poller1 is registered earlier than poller2.
1535 	 */
1536 	CU_ASSERT(thread->first_timed_poller == poller1);
1537 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1538 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1539 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1540 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1541 
1542 	/* after 500 usec, poller1 and poller2 are expired. */
1543 	spdk_delay_us(500);
1544 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1545 	poll_threads();
1546 
1547 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1548 	 * has poller3 because poller3 is not expired yet.
1549 	 */
1550 	CU_ASSERT(thread->first_timed_poller == poller3);
1551 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1552 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1553 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1554 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1555 
1556 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1557 	spdk_delay_us(500);
1558 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1559 	poll_threads();
1560 
1561 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1562 	 * has poller4 because poller4 is not expired yet.
1563 	 */
1564 	CU_ASSERT(thread->first_timed_poller == poller4);
1565 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1566 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1567 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1568 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1569 
1570 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1571 	spdk_delay_us(500);
1572 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1573 	poll_threads();
1574 
1575 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1576 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1577 	 */
1578 	CU_ASSERT(thread->first_timed_poller == poller3);
1579 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1580 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1581 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1582 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1583 
1584 	spdk_poller_unregister(&poller1);
1585 	spdk_poller_unregister(&poller2);
1586 	spdk_poller_unregister(&poller3);
1587 	spdk_poller_unregister(&poller4);
1588 
1589 	spdk_delay_us(1500);
1590 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1591 	poll_threads();
1592 
1593 	CU_ASSERT(thread->first_timed_poller == NULL);
1594 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1595 
1596 	/*
1597 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1598 	 */
1599 	start_ticks = spdk_get_ticks();
1600 
1601 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1602 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1603 
1604 	CU_ASSERT(thread->first_timed_poller == poller1);
1605 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1606 
1607 	/* after 250 usec, register poller2 and poller3. */
1608 	spdk_delay_us(250);
1609 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1610 
1611 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1612 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1613 
1614 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1615 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1616 
1617 	CU_ASSERT(thread->first_timed_poller == poller1);
1618 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1619 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1620 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1621 
1622 	/* unregister poller2 which is not the closest. */
1623 	tmp = poller2;
1624 	spdk_poller_unregister(&poller2);
1625 
1626 	/* after 250 usec, poller1 is expired. */
1627 	spdk_delay_us(250);
1628 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1629 	poll_threads();
1630 
1631 	/* poller2 is not unregistered yet because it is not expired. */
1632 	CU_ASSERT(thread->first_timed_poller == tmp);
1633 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1634 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1635 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1636 
1637 	spdk_delay_us(250);
1638 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1639 	poll_threads();
1640 
1641 	CU_ASSERT(thread->first_timed_poller == poller3);
1642 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1643 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1644 
1645 	spdk_poller_unregister(&poller3);
1646 
1647 	spdk_delay_us(250);
1648 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1649 	poll_threads();
1650 
1651 	CU_ASSERT(thread->first_timed_poller == poller1);
1652 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1653 
1654 	spdk_poller_unregister(&poller1);
1655 
1656 	spdk_delay_us(500);
1657 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1658 	poll_threads();
1659 
1660 	CU_ASSERT(thread->first_timed_poller == NULL);
1661 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1662 
1663 	free_threads();
1664 }
1665 
1666 static int
1667 dummy_create_cb(void *io_device, void *ctx_buf)
1668 {
1669 	return 0;
1670 }
1671 
1672 static void
1673 dummy_destroy_cb(void *io_device, void *ctx_buf)
1674 {
1675 }
1676 
1677 /* We had a bug that the compare function for the io_device tree
1678  * did not work as expected because subtraction caused overflow
1679  * when the difference between two keys was more than 32 bits.
1680  * This test case verifies the fix for the bug.
1681  */
1682 static void
1683 io_device_lookup(void)
1684 {
1685 	struct io_device dev1, dev2, *dev;
1686 	struct spdk_io_channel *ch;
1687 
1688 	/* The compare function io_device_cmp() had a overflow bug.
1689 	 * Verify the fix first.
1690 	 */
1691 	dev1.io_device = (void *)0x7FFFFFFF;
1692 	dev2.io_device = NULL;
1693 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1694 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1695 
1696 	/* Check if overflow due to 32 bits does not occur. */
1697 	dev1.io_device = (void *)0x80000000;
1698 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1699 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1700 
1701 	dev1.io_device = (void *)0x100000000;
1702 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1703 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1704 
1705 	dev1.io_device = (void *)0x8000000000000000;
1706 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1707 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1708 
1709 	allocate_threads(1);
1710 	set_thread(0);
1711 
1712 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1713 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1714 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1715 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1716 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1717 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1718 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1719 
1720 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1721 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1722 	 */
1723 	dev = RB_MIN(io_device_tree, &g_io_devices);
1724 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1725 	CU_ASSERT(dev->io_device == (void *)0x1);
1726 
1727 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1728 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1729 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1730 
1731 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1732 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1733 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1734 
1735 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1736 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1737 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1738 
1739 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1740 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1741 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1742 
1743 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1744 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1745 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1746 
1747 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1748 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1749 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1750 
1751 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1752 	 * correct io_devices.
1753 	 */
1754 	ch = spdk_get_io_channel((void *)0x1);
1755 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1756 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1757 	spdk_put_io_channel(ch);
1758 
1759 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1760 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1761 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1762 	spdk_put_io_channel(ch);
1763 
1764 	ch = spdk_get_io_channel((void *)0x80000000);
1765 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1766 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1767 	spdk_put_io_channel(ch);
1768 
1769 	ch = spdk_get_io_channel((void *)0x100000000);
1770 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1771 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1772 	spdk_put_io_channel(ch);
1773 
1774 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1775 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1776 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1777 	spdk_put_io_channel(ch);
1778 
1779 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1780 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1781 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1782 	spdk_put_io_channel(ch);
1783 
1784 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1785 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1786 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1787 	spdk_put_io_channel(ch);
1788 
1789 	poll_threads();
1790 
1791 	spdk_io_device_unregister((void *)0x1, NULL);
1792 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1793 	spdk_io_device_unregister((void *)0x80000000, NULL);
1794 	spdk_io_device_unregister((void *)0x100000000, NULL);
1795 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1796 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1797 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1798 
1799 	poll_threads();
1800 
1801 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1802 
1803 	free_threads();
1804 }
1805 
1806 int
1807 main(int argc, char **argv)
1808 {
1809 	CU_pSuite	suite = NULL;
1810 	unsigned int	num_failures;
1811 
1812 	CU_set_error_action(CUEA_ABORT);
1813 	CU_initialize_registry();
1814 
1815 	suite = CU_add_suite("io_channel", NULL, NULL);
1816 
1817 	CU_ADD_TEST(suite, thread_alloc);
1818 	CU_ADD_TEST(suite, thread_send_msg);
1819 	CU_ADD_TEST(suite, thread_poller);
1820 	CU_ADD_TEST(suite, poller_pause);
1821 	CU_ADD_TEST(suite, thread_for_each);
1822 	CU_ADD_TEST(suite, for_each_channel_remove);
1823 	CU_ADD_TEST(suite, for_each_channel_unreg);
1824 	CU_ADD_TEST(suite, thread_name);
1825 	CU_ADD_TEST(suite, channel);
1826 	CU_ADD_TEST(suite, channel_destroy_races);
1827 	CU_ADD_TEST(suite, thread_exit_test);
1828 	CU_ADD_TEST(suite, thread_update_stats_test);
1829 	CU_ADD_TEST(suite, nested_channel);
1830 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1831 	CU_ADD_TEST(suite, cache_closest_timed_poller);
1832 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
1833 	CU_ADD_TEST(suite, io_device_lookup);
1834 
1835 	CU_basic_set_mode(CU_BRM_VERBOSE);
1836 	CU_basic_run_tests();
1837 	num_failures = CU_get_number_of_failures();
1838 	CU_cleanup_registry();
1839 	return num_failures;
1840 }
1841