xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk_cunit.h"
9 
10 #include "thread/thread_internal.h"
11 
12 #include "thread/thread.c"
13 #include "common/lib/ut_multithread.c"
14 
15 static int g_sched_rc = 0;
16 
17 static int
18 _thread_schedule(struct spdk_thread *thread)
19 {
20 	return g_sched_rc;
21 }
22 
23 static bool
24 _thread_op_supported(enum spdk_thread_op op)
25 {
26 	switch (op) {
27 	case SPDK_THREAD_OP_NEW:
28 		return true;
29 	default:
30 		return false;
31 	}
32 }
33 
34 static int
35 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
36 {
37 	switch (op) {
38 	case SPDK_THREAD_OP_NEW:
39 		return _thread_schedule(thread);
40 	default:
41 		return -ENOTSUP;
42 	}
43 }
44 
45 static void
46 thread_alloc(void)
47 {
48 	struct spdk_thread *thread;
49 
50 	/* No schedule callback */
51 	spdk_thread_lib_init(NULL, 0);
52 	thread = spdk_thread_create(NULL, NULL);
53 	SPDK_CU_ASSERT_FATAL(thread != NULL);
54 	spdk_set_thread(thread);
55 	spdk_thread_exit(thread);
56 	while (!spdk_thread_is_exited(thread)) {
57 		spdk_thread_poll(thread, 0, 0);
58 	}
59 	spdk_thread_destroy(thread);
60 	spdk_thread_lib_fini();
61 
62 	/* Schedule callback exists */
63 	spdk_thread_lib_init(_thread_schedule, 0);
64 
65 	/* Scheduling succeeds */
66 	g_sched_rc = 0;
67 	thread = spdk_thread_create(NULL, NULL);
68 	SPDK_CU_ASSERT_FATAL(thread != NULL);
69 	spdk_set_thread(thread);
70 	spdk_thread_exit(thread);
71 	while (!spdk_thread_is_exited(thread)) {
72 		spdk_thread_poll(thread, 0, 0);
73 	}
74 	spdk_thread_destroy(thread);
75 
76 	/* Scheduling fails */
77 	g_sched_rc = -1;
78 	thread = spdk_thread_create(NULL, NULL);
79 	SPDK_CU_ASSERT_FATAL(thread == NULL);
80 
81 	spdk_thread_lib_fini();
82 
83 	/* Scheduling callback exists with extended thread library initialization. */
84 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0,
85 				 SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
86 
87 	/* Scheduling succeeds */
88 	g_sched_rc = 0;
89 	thread = spdk_thread_create(NULL, NULL);
90 	SPDK_CU_ASSERT_FATAL(thread != NULL);
91 	spdk_set_thread(thread);
92 	spdk_thread_exit(thread);
93 	while (!spdk_thread_is_exited(thread)) {
94 		spdk_thread_poll(thread, 0, 0);
95 	}
96 	spdk_thread_destroy(thread);
97 
98 	/* Scheduling fails */
99 	g_sched_rc = -1;
100 	thread = spdk_thread_create(NULL, NULL);
101 	SPDK_CU_ASSERT_FATAL(thread == NULL);
102 
103 	spdk_thread_lib_fini();
104 }
105 
106 static void
107 send_msg_cb(void *ctx)
108 {
109 	bool *done = ctx;
110 
111 	*done = true;
112 }
113 
114 static void
115 thread_send_msg(void)
116 {
117 	struct spdk_thread *thread0;
118 	bool done = false;
119 
120 	allocate_threads(2);
121 	set_thread(0);
122 	thread0 = spdk_get_thread();
123 
124 	set_thread(1);
125 	/* Simulate thread 1 sending a message to thread 0. */
126 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
127 
128 	/* We have not polled thread 0 yet, so done should be false. */
129 	CU_ASSERT(!done);
130 
131 	/*
132 	 * Poll thread 1.  The message was sent to thread 0, so this should be
133 	 *  a nop and done should still be false.
134 	 */
135 	poll_thread(1);
136 	CU_ASSERT(!done);
137 
138 	/*
139 	 * Poll thread 0.  This should execute the message and done should then
140 	 *  be true.
141 	 */
142 	poll_thread(0);
143 	CU_ASSERT(done);
144 
145 	free_threads();
146 }
147 
148 static int
149 poller_run_done(void *ctx)
150 {
151 	bool	*poller_run = ctx;
152 
153 	*poller_run = true;
154 
155 	return -1;
156 }
157 
158 static void
159 thread_poller(void)
160 {
161 	struct spdk_poller	*poller = NULL;
162 	bool			poller_run = false;
163 
164 	allocate_threads(1);
165 
166 	set_thread(0);
167 	MOCK_SET(spdk_get_ticks, 0);
168 	/* Register a poller with no-wait time and test execution */
169 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
170 	CU_ASSERT(poller != NULL);
171 
172 	poll_threads();
173 	CU_ASSERT(poller_run == true);
174 
175 	spdk_poller_unregister(&poller);
176 	CU_ASSERT(poller == NULL);
177 
178 	/* Register a poller with 1000us wait time and test single execution */
179 	poller_run = false;
180 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
181 	CU_ASSERT(poller != NULL);
182 
183 	poll_threads();
184 	CU_ASSERT(poller_run == false);
185 
186 	spdk_delay_us(1000);
187 	poll_threads();
188 	CU_ASSERT(poller_run == true);
189 
190 	poller_run = false;
191 	poll_threads();
192 	CU_ASSERT(poller_run == false);
193 
194 	spdk_delay_us(1000);
195 	poll_threads();
196 	CU_ASSERT(poller_run == true);
197 
198 	spdk_poller_unregister(&poller);
199 	CU_ASSERT(poller == NULL);
200 
201 	free_threads();
202 }
203 
204 struct poller_ctx {
205 	struct spdk_poller	*poller;
206 	bool			run;
207 };
208 
209 static int
210 poller_run_pause(void *ctx)
211 {
212 	struct poller_ctx *poller_ctx = ctx;
213 
214 	poller_ctx->run = true;
215 	spdk_poller_pause(poller_ctx->poller);
216 
217 	return 0;
218 }
219 
220 /* Verify the same poller can be switched multiple times between
221  * pause and resume while it runs.
222  */
223 static int
224 poller_run_pause_resume_pause(void *ctx)
225 {
226 	struct poller_ctx *poller_ctx = ctx;
227 
228 	poller_ctx->run = true;
229 
230 	spdk_poller_pause(poller_ctx->poller);
231 	spdk_poller_resume(poller_ctx->poller);
232 	spdk_poller_pause(poller_ctx->poller);
233 
234 	return 0;
235 }
236 
237 static void
238 poller_msg_pause_cb(void *ctx)
239 {
240 	struct spdk_poller *poller = ctx;
241 
242 	spdk_poller_pause(poller);
243 }
244 
245 static void
246 poller_msg_resume_cb(void *ctx)
247 {
248 	struct spdk_poller *poller = ctx;
249 
250 	spdk_poller_resume(poller);
251 }
252 
253 static void
254 poller_pause(void)
255 {
256 	struct poller_ctx poller_ctx = {};
257 	unsigned int delay[] = { 0, 1000 };
258 	unsigned int i;
259 
260 	allocate_threads(1);
261 	set_thread(0);
262 
263 	/* Register a poller that pauses itself */
264 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
265 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
266 
267 	poller_ctx.run = false;
268 	poll_threads();
269 	CU_ASSERT_EQUAL(poller_ctx.run, true);
270 
271 	poller_ctx.run = false;
272 	poll_threads();
273 	CU_ASSERT_EQUAL(poller_ctx.run, false);
274 
275 	spdk_poller_unregister(&poller_ctx.poller);
276 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
277 
278 	/* Register a poller that switches between pause and resume itself */
279 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
280 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
281 
282 	poller_ctx.run = false;
283 	poll_threads();
284 	CU_ASSERT_EQUAL(poller_ctx.run, true);
285 
286 	poller_ctx.run = false;
287 	poll_threads();
288 	CU_ASSERT_EQUAL(poller_ctx.run, false);
289 
290 	spdk_poller_unregister(&poller_ctx.poller);
291 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
292 
293 	/* Verify that resuming an unpaused poller doesn't do anything */
294 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
295 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
296 
297 	spdk_poller_resume(poller_ctx.poller);
298 
299 	poller_ctx.run = false;
300 	poll_threads();
301 	CU_ASSERT_EQUAL(poller_ctx.run, true);
302 
303 	/* Verify that pausing the same poller twice works too */
304 	spdk_poller_pause(poller_ctx.poller);
305 
306 	poller_ctx.run = false;
307 	poll_threads();
308 	CU_ASSERT_EQUAL(poller_ctx.run, false);
309 
310 	spdk_poller_pause(poller_ctx.poller);
311 	poll_threads();
312 	CU_ASSERT_EQUAL(poller_ctx.run, false);
313 
314 	spdk_poller_resume(poller_ctx.poller);
315 	poll_threads();
316 	CU_ASSERT_EQUAL(poller_ctx.run, true);
317 
318 	/* Verify that a poller is run when it's resumed immediately after pausing */
319 	poller_ctx.run = false;
320 	spdk_poller_pause(poller_ctx.poller);
321 	spdk_poller_resume(poller_ctx.poller);
322 	poll_threads();
323 	CU_ASSERT_EQUAL(poller_ctx.run, true);
324 
325 	spdk_poller_unregister(&poller_ctx.poller);
326 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
327 
328 	/* Poll the thread to make sure the previous poller gets unregistered */
329 	poll_threads();
330 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
331 
332 	/* Verify that it's possible to unregister a paused poller */
333 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
334 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
335 
336 	poller_ctx.run = false;
337 	poll_threads();
338 	CU_ASSERT_EQUAL(poller_ctx.run, true);
339 
340 	spdk_poller_pause(poller_ctx.poller);
341 
342 	poller_ctx.run = false;
343 	poll_threads();
344 	CU_ASSERT_EQUAL(poller_ctx.run, false);
345 
346 	spdk_poller_unregister(&poller_ctx.poller);
347 
348 	poll_threads();
349 	CU_ASSERT_EQUAL(poller_ctx.run, false);
350 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
351 
352 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
353 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
354 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
355 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
356 
357 		spdk_delay_us(delay[i]);
358 		poller_ctx.run = false;
359 		poll_threads();
360 		CU_ASSERT_EQUAL(poller_ctx.run, true);
361 
362 		spdk_poller_pause(poller_ctx.poller);
363 
364 		spdk_delay_us(delay[i]);
365 		poller_ctx.run = false;
366 		poll_threads();
367 		CU_ASSERT_EQUAL(poller_ctx.run, false);
368 
369 		spdk_poller_resume(poller_ctx.poller);
370 
371 		spdk_delay_us(delay[i]);
372 		poll_threads();
373 		CU_ASSERT_EQUAL(poller_ctx.run, true);
374 
375 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
376 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
377 
378 		spdk_delay_us(delay[i]);
379 		poller_ctx.run = false;
380 		poll_threads();
381 		CU_ASSERT_EQUAL(poller_ctx.run, false);
382 
383 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
384 
385 		poll_threads();
386 		if (delay[i] > 0) {
387 			spdk_delay_us(delay[i]);
388 			poll_threads();
389 		}
390 		CU_ASSERT_EQUAL(poller_ctx.run, true);
391 
392 		spdk_poller_unregister(&poller_ctx.poller);
393 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
394 
395 		/* Register a timed poller that pauses itself */
396 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
397 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
398 
399 		spdk_delay_us(delay[i]);
400 		poller_ctx.run = false;
401 		poll_threads();
402 		CU_ASSERT_EQUAL(poller_ctx.run, true);
403 
404 		poller_ctx.run = false;
405 		spdk_delay_us(delay[i]);
406 		poll_threads();
407 		CU_ASSERT_EQUAL(poller_ctx.run, false);
408 
409 		spdk_poller_resume(poller_ctx.poller);
410 
411 		CU_ASSERT_EQUAL(poller_ctx.run, false);
412 		spdk_delay_us(delay[i]);
413 		poll_threads();
414 		CU_ASSERT_EQUAL(poller_ctx.run, true);
415 
416 		spdk_poller_unregister(&poller_ctx.poller);
417 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
418 
419 		/* Register a timed poller that switches between pause and resume itself */
420 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
421 				    &poller_ctx, delay[i]);
422 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
423 
424 		spdk_delay_us(delay[i]);
425 		poller_ctx.run = false;
426 		poll_threads();
427 		CU_ASSERT_EQUAL(poller_ctx.run, true);
428 
429 		poller_ctx.run = false;
430 		spdk_delay_us(delay[i]);
431 		poll_threads();
432 		CU_ASSERT_EQUAL(poller_ctx.run, false);
433 
434 		spdk_poller_resume(poller_ctx.poller);
435 
436 		CU_ASSERT_EQUAL(poller_ctx.run, false);
437 		spdk_delay_us(delay[i]);
438 		poll_threads();
439 		CU_ASSERT_EQUAL(poller_ctx.run, true);
440 
441 		spdk_poller_unregister(&poller_ctx.poller);
442 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
443 	}
444 
445 	free_threads();
446 }
447 
448 static void
449 for_each_cb(void *ctx)
450 {
451 	int *count = ctx;
452 
453 	(*count)++;
454 }
455 
456 static void
457 thread_for_each(void)
458 {
459 	int count = 0;
460 	int i;
461 
462 	allocate_threads(3);
463 	set_thread(0);
464 
465 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
466 
467 	/* We have not polled thread 0 yet, so count should be 0 */
468 	CU_ASSERT(count == 0);
469 
470 	/* Poll each thread to verify the message is passed to each */
471 	for (i = 0; i < 3; i++) {
472 		poll_thread(i);
473 		CU_ASSERT(count == (i + 1));
474 	}
475 
476 	/*
477 	 * After each thread is called, the completion calls it
478 	 * one more time.
479 	 */
480 	poll_thread(0);
481 	CU_ASSERT(count == 4);
482 
483 	free_threads();
484 }
485 
486 static int
487 channel_create(void *io_device, void *ctx_buf)
488 {
489 	int *ch_count = io_device;
490 
491 	(*ch_count)++;
492 	return 0;
493 }
494 
495 static void
496 channel_destroy(void *io_device, void *ctx_buf)
497 {
498 	int *ch_count = io_device;
499 
500 	(*ch_count)--;
501 }
502 
503 static void
504 channel_msg(struct spdk_io_channel_iter *i)
505 {
506 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
507 
508 	(*msg_count)++;
509 	spdk_for_each_channel_continue(i, 0);
510 }
511 
512 static void
513 channel_cpl(struct spdk_io_channel_iter *i, int status)
514 {
515 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
516 
517 	(*msg_count)++;
518 }
519 
520 static void
521 for_each_channel_remove(void)
522 {
523 	struct spdk_io_channel *ch0, *ch1, *ch2;
524 	int ch_count = 0;
525 	int msg_count = 0;
526 
527 	allocate_threads(3);
528 	set_thread(0);
529 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
530 	ch0 = spdk_get_io_channel(&ch_count);
531 	set_thread(1);
532 	ch1 = spdk_get_io_channel(&ch_count);
533 	set_thread(2);
534 	ch2 = spdk_get_io_channel(&ch_count);
535 	CU_ASSERT(ch_count == 3);
536 
537 	/*
538 	 * Test that io_channel handles the case where we start to iterate through
539 	 *  the channels, and during the iteration, one of the channels is deleted.
540 	 * This is done in some different and sometimes non-intuitive orders, because
541 	 *  some operations are deferred and won't execute until their threads are
542 	 *  polled.
543 	 *
544 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
545 	 */
546 	set_thread(0);
547 	spdk_put_io_channel(ch0);
548 	CU_ASSERT(ch_count == 3);
549 	poll_threads();
550 	CU_ASSERT(ch_count == 2);
551 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
552 	CU_ASSERT(msg_count == 0);
553 	poll_threads();
554 	CU_ASSERT(msg_count == 3);
555 
556 	msg_count = 0;
557 
558 	/*
559 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
560 	 *  thread 0 is polled.
561 	 */
562 	ch0 = spdk_get_io_channel(&ch_count);
563 	CU_ASSERT(ch_count == 3);
564 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
565 	spdk_put_io_channel(ch0);
566 	CU_ASSERT(ch_count == 3);
567 
568 	poll_threads();
569 	CU_ASSERT(ch_count == 2);
570 	CU_ASSERT(msg_count == 4);
571 	set_thread(1);
572 	spdk_put_io_channel(ch1);
573 	CU_ASSERT(ch_count == 2);
574 	set_thread(2);
575 	spdk_put_io_channel(ch2);
576 	CU_ASSERT(ch_count == 2);
577 	poll_threads();
578 	CU_ASSERT(ch_count == 0);
579 
580 	spdk_io_device_unregister(&ch_count, NULL);
581 	poll_threads();
582 
583 	free_threads();
584 }
585 
586 struct unreg_ctx {
587 	bool	ch_done;
588 	bool	foreach_done;
589 };
590 
591 static void
592 unreg_ch_done(struct spdk_io_channel_iter *i)
593 {
594 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
595 
596 	ctx->ch_done = true;
597 
598 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
599 	spdk_for_each_channel_continue(i, 0);
600 }
601 
602 static void
603 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
604 {
605 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
606 
607 	ctx->foreach_done = true;
608 }
609 
610 static void
611 for_each_channel_unreg(void)
612 {
613 	struct spdk_io_channel *ch0;
614 	struct io_device *dev;
615 	struct unreg_ctx ctx = {};
616 	int io_target = 0;
617 
618 	allocate_threads(1);
619 	set_thread(0);
620 	CU_ASSERT(RB_EMPTY(&g_io_devices));
621 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
622 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
623 	dev = RB_MIN(io_device_tree, &g_io_devices);
624 	SPDK_CU_ASSERT_FATAL(dev != NULL);
625 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
626 	ch0 = spdk_get_io_channel(&io_target);
627 
628 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
629 
630 	/*
631 	 * There is already a device registered at &io_target, so a new io_device should not
632 	 *  have been added to g_io_devices.
633 	 */
634 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
635 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
636 
637 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
638 	spdk_io_device_unregister(&io_target, NULL);
639 	/*
640 	 * There is an outstanding foreach call on the io_device, so the unregister should not
641 	 *  have immediately removed the device.
642 	 */
643 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
644 
645 	poll_thread(0);
646 	CU_ASSERT(ctx.ch_done == true);
647 	CU_ASSERT(ctx.foreach_done == true);
648 
649 	/*
650 	 * There are no more foreach operations outstanding, so the device should be
651 	 * unregistered.
652 	 */
653 	CU_ASSERT(RB_EMPTY(&g_io_devices));
654 
655 	set_thread(0);
656 	spdk_put_io_channel(ch0);
657 
658 	poll_threads();
659 
660 	free_threads();
661 }
662 
663 static void
664 thread_name(void)
665 {
666 	struct spdk_thread *thread;
667 	const char *name;
668 
669 	spdk_thread_lib_init(NULL, 0);
670 
671 	/* Create thread with no name, which automatically generates one */
672 	thread = spdk_thread_create(NULL, NULL);
673 	spdk_set_thread(thread);
674 	thread = spdk_get_thread();
675 	SPDK_CU_ASSERT_FATAL(thread != NULL);
676 	name = spdk_thread_get_name(thread);
677 	CU_ASSERT(name != NULL);
678 	spdk_thread_exit(thread);
679 	while (!spdk_thread_is_exited(thread)) {
680 		spdk_thread_poll(thread, 0, 0);
681 	}
682 	spdk_thread_destroy(thread);
683 
684 	/* Create thread named "test_thread" */
685 	thread = spdk_thread_create("test_thread", NULL);
686 	spdk_set_thread(thread);
687 	thread = spdk_get_thread();
688 	SPDK_CU_ASSERT_FATAL(thread != NULL);
689 	name = spdk_thread_get_name(thread);
690 	SPDK_CU_ASSERT_FATAL(name != NULL);
691 	CU_ASSERT(strcmp(name, "test_thread") == 0);
692 	spdk_thread_exit(thread);
693 	while (!spdk_thread_is_exited(thread)) {
694 		spdk_thread_poll(thread, 0, 0);
695 	}
696 	spdk_thread_destroy(thread);
697 
698 	spdk_thread_lib_fini();
699 }
700 
701 static uint64_t g_device1;
702 static uint64_t g_device2;
703 static uint64_t g_device3;
704 
705 static uint64_t g_ctx1 = 0x1111;
706 static uint64_t g_ctx2 = 0x2222;
707 
708 static int g_create_cb_calls = 0;
709 static int g_destroy_cb_calls = 0;
710 
711 static int
712 create_cb_1(void *io_device, void *ctx_buf)
713 {
714 	CU_ASSERT(io_device == &g_device1);
715 	*(uint64_t *)ctx_buf = g_ctx1;
716 	g_create_cb_calls++;
717 	return 0;
718 }
719 
720 static void
721 destroy_cb_1(void *io_device, void *ctx_buf)
722 {
723 	CU_ASSERT(io_device == &g_device1);
724 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
725 	g_destroy_cb_calls++;
726 }
727 
728 static int
729 create_cb_2(void *io_device, void *ctx_buf)
730 {
731 	CU_ASSERT(io_device == &g_device2);
732 	*(uint64_t *)ctx_buf = g_ctx2;
733 	g_create_cb_calls++;
734 	return 0;
735 }
736 
737 static void
738 destroy_cb_2(void *io_device, void *ctx_buf)
739 {
740 	CU_ASSERT(io_device == &g_device2);
741 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
742 	g_destroy_cb_calls++;
743 }
744 
745 static void
746 channel(void)
747 {
748 	struct spdk_io_channel *ch1, *ch2;
749 	void *ctx;
750 
751 	allocate_threads(1);
752 	set_thread(0);
753 
754 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
755 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
756 
757 	g_create_cb_calls = 0;
758 	ch1 = spdk_get_io_channel(&g_device1);
759 	CU_ASSERT(g_create_cb_calls == 1);
760 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
761 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
762 
763 	g_create_cb_calls = 0;
764 	ch2 = spdk_get_io_channel(&g_device1);
765 	CU_ASSERT(g_create_cb_calls == 0);
766 	CU_ASSERT(ch1 == ch2);
767 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
768 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
769 
770 	g_destroy_cb_calls = 0;
771 	spdk_put_io_channel(ch2);
772 	poll_threads();
773 	CU_ASSERT(g_destroy_cb_calls == 0);
774 
775 	g_create_cb_calls = 0;
776 	ch2 = spdk_get_io_channel(&g_device2);
777 	CU_ASSERT(g_create_cb_calls == 1);
778 	CU_ASSERT(ch1 != ch2);
779 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
780 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
781 
782 	ctx = spdk_io_channel_get_ctx(ch2);
783 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
784 
785 	g_destroy_cb_calls = 0;
786 	spdk_put_io_channel(ch1);
787 	poll_threads();
788 	CU_ASSERT(g_destroy_cb_calls == 1);
789 
790 	g_destroy_cb_calls = 0;
791 	spdk_put_io_channel(ch2);
792 	poll_threads();
793 	CU_ASSERT(g_destroy_cb_calls == 1);
794 
795 	ch1 = spdk_get_io_channel(&g_device3);
796 	CU_ASSERT(ch1 == NULL);
797 
798 	spdk_io_device_unregister(&g_device1, NULL);
799 	poll_threads();
800 	spdk_io_device_unregister(&g_device2, NULL);
801 	poll_threads();
802 	CU_ASSERT(RB_EMPTY(&g_io_devices));
803 	free_threads();
804 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
805 }
806 
807 static int
808 create_cb(void *io_device, void *ctx_buf)
809 {
810 	uint64_t *refcnt = (uint64_t *)ctx_buf;
811 
812 	CU_ASSERT(*refcnt == 0);
813 	*refcnt = 1;
814 
815 	return 0;
816 }
817 
818 static void
819 destroy_cb(void *io_device, void *ctx_buf)
820 {
821 	uint64_t *refcnt = (uint64_t *)ctx_buf;
822 
823 	CU_ASSERT(*refcnt == 1);
824 	*refcnt = 0;
825 }
826 
827 /**
828  * This test is checking that a sequence of get, put, get, put without allowing
829  * the deferred put operation to complete doesn't result in releasing the memory
830  * for the channel twice.
831  */
832 static void
833 channel_destroy_races(void)
834 {
835 	uint64_t device;
836 	struct spdk_io_channel *ch;
837 
838 	allocate_threads(1);
839 	set_thread(0);
840 
841 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
842 
843 	ch = spdk_get_io_channel(&device);
844 	SPDK_CU_ASSERT_FATAL(ch != NULL);
845 
846 	spdk_put_io_channel(ch);
847 
848 	ch = spdk_get_io_channel(&device);
849 	SPDK_CU_ASSERT_FATAL(ch != NULL);
850 
851 	spdk_put_io_channel(ch);
852 	poll_threads();
853 
854 	spdk_io_device_unregister(&device, NULL);
855 	poll_threads();
856 
857 	CU_ASSERT(RB_EMPTY(&g_io_devices));
858 	free_threads();
859 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
860 }
861 
862 static void
863 thread_exit_test(void)
864 {
865 	struct spdk_thread *thread;
866 	struct spdk_io_channel *ch;
867 	struct spdk_poller *poller1, *poller2;
868 	void *ctx;
869 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
870 	int rc __attribute__((unused));
871 
872 	MOCK_SET(spdk_get_ticks, 10);
873 	MOCK_SET(spdk_get_ticks_hz, 1);
874 
875 	allocate_threads(4);
876 
877 	/* Test if all pending messages are reaped for the exiting thread, and the
878 	 * thread moves to the exited state.
879 	 */
880 	set_thread(0);
881 	thread = spdk_get_thread();
882 
883 	/* Sending message to thread 0 will be accepted. */
884 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
885 	CU_ASSERT(rc == 0);
886 	CU_ASSERT(!done1);
887 
888 	/* Move thread 0 to the exiting state. */
889 	spdk_thread_exit(thread);
890 
891 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
892 
893 	/* Sending message to thread 0 will be still accepted. */
894 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
895 	CU_ASSERT(rc == 0);
896 
897 	/* Thread 0 will reap pending messages. */
898 	poll_thread(0);
899 	CU_ASSERT(done1 == true);
900 	CU_ASSERT(done2 == true);
901 
902 	/* Thread 0 will move to the exited state. */
903 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
904 
905 	/* Test releasing I/O channel is reaped even after the thread moves to
906 	 * the exiting state
907 	 */
908 	set_thread(1);
909 
910 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
911 
912 	g_create_cb_calls = 0;
913 	ch = spdk_get_io_channel(&g_device1);
914 	CU_ASSERT(g_create_cb_calls == 1);
915 	SPDK_CU_ASSERT_FATAL(ch != NULL);
916 
917 	ctx = spdk_io_channel_get_ctx(ch);
918 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
919 
920 	g_destroy_cb_calls = 0;
921 	spdk_put_io_channel(ch);
922 
923 	thread = spdk_get_thread();
924 	spdk_thread_exit(thread);
925 
926 	/* Thread 1 will not move to the exited state yet because I/O channel release
927 	 * does not complete yet.
928 	 */
929 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
930 
931 	/* Thread 1 will be able to get the another reference of I/O channel
932 	 * even after the thread moves to the exiting state.
933 	 */
934 	g_create_cb_calls = 0;
935 	ch = spdk_get_io_channel(&g_device1);
936 
937 	CU_ASSERT(g_create_cb_calls == 0);
938 	SPDK_CU_ASSERT_FATAL(ch != NULL);
939 
940 	ctx = spdk_io_channel_get_ctx(ch);
941 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
942 
943 	spdk_put_io_channel(ch);
944 
945 	poll_threads();
946 	CU_ASSERT(g_destroy_cb_calls == 1);
947 
948 	/* Thread 1 will move to the exited state after I/O channel is released.
949 	 * are released.
950 	 */
951 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
952 
953 	spdk_io_device_unregister(&g_device1, NULL);
954 	poll_threads();
955 
956 	/* Test if unregistering poller is reaped for the exiting thread, and the
957 	 * thread moves to the exited thread.
958 	 */
959 	set_thread(2);
960 	thread = spdk_get_thread();
961 
962 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
963 	CU_ASSERT(poller1 != NULL);
964 
965 	spdk_poller_unregister(&poller1);
966 
967 	spdk_thread_exit(thread);
968 
969 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
970 
971 	poll_threads();
972 
973 	CU_ASSERT(poller1_run == false);
974 	CU_ASSERT(poller2_run == true);
975 
976 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
977 
978 	spdk_poller_unregister(&poller2);
979 
980 	poll_threads();
981 
982 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
983 
984 	/* Test if the exiting thread is exited forcefully after timeout. */
985 	set_thread(3);
986 	thread = spdk_get_thread();
987 
988 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
989 	CU_ASSERT(poller1 != NULL);
990 
991 	spdk_thread_exit(thread);
992 
993 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
994 
995 	MOCK_SET(spdk_get_ticks, 11);
996 
997 	poll_threads();
998 
999 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1000 
1001 	/* Cause timeout forcefully. */
1002 	MOCK_SET(spdk_get_ticks, 15);
1003 
1004 	poll_threads();
1005 
1006 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1007 
1008 	spdk_poller_unregister(&poller1);
1009 
1010 	poll_threads();
1011 
1012 	MOCK_CLEAR(spdk_get_ticks);
1013 	MOCK_CLEAR(spdk_get_ticks_hz);
1014 
1015 	free_threads();
1016 }
1017 
1018 static int
1019 poller_run_idle(void *ctx)
1020 {
1021 	uint64_t delay_us = (uint64_t)ctx;
1022 
1023 	spdk_delay_us(delay_us);
1024 
1025 	return 0;
1026 }
1027 
1028 static int
1029 poller_run_busy(void *ctx)
1030 {
1031 	uint64_t delay_us = (uint64_t)ctx;
1032 
1033 	spdk_delay_us(delay_us);
1034 
1035 	return 1;
1036 }
1037 
1038 static void
1039 thread_update_stats_test(void)
1040 {
1041 	struct spdk_poller	*poller;
1042 	struct spdk_thread	*thread;
1043 
1044 	MOCK_SET(spdk_get_ticks, 10);
1045 
1046 	allocate_threads(1);
1047 
1048 	set_thread(0);
1049 	thread = spdk_get_thread();
1050 
1051 	CU_ASSERT(thread->tsc_last == 10);
1052 	CU_ASSERT(thread->stats.idle_tsc == 0);
1053 	CU_ASSERT(thread->stats.busy_tsc == 0);
1054 
1055 	/* Test if idle_tsc is updated expectedly. */
1056 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1057 	CU_ASSERT(poller != NULL);
1058 
1059 	spdk_delay_us(100);
1060 
1061 	poll_thread_times(0, 1);
1062 
1063 	CU_ASSERT(thread->tsc_last == 1110);
1064 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1065 	CU_ASSERT(thread->stats.busy_tsc == 0);
1066 
1067 	spdk_delay_us(100);
1068 
1069 	poll_thread_times(0, 1);
1070 
1071 	CU_ASSERT(thread->tsc_last == 2210);
1072 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1073 	CU_ASSERT(thread->stats.busy_tsc == 0);
1074 
1075 	spdk_poller_unregister(&poller);
1076 
1077 	/* Test if busy_tsc is updated expectedly. */
1078 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1079 	CU_ASSERT(poller != NULL);
1080 
1081 	spdk_delay_us(10000);
1082 
1083 	poll_thread_times(0, 1);
1084 
1085 	CU_ASSERT(thread->tsc_last == 112210);
1086 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1087 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1088 
1089 	spdk_delay_us(10000);
1090 
1091 	poll_thread_times(0, 1);
1092 
1093 	CU_ASSERT(thread->tsc_last == 222210);
1094 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1095 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1096 
1097 	spdk_poller_unregister(&poller);
1098 
1099 	MOCK_CLEAR(spdk_get_ticks);
1100 
1101 	free_threads();
1102 }
1103 
1104 struct ut_nested_ch {
1105 	struct spdk_io_channel *child;
1106 	struct spdk_poller *poller;
1107 };
1108 
1109 struct ut_nested_dev {
1110 	struct ut_nested_dev *child;
1111 };
1112 
1113 static int
1114 ut_null_poll(void *ctx)
1115 {
1116 	return -1;
1117 }
1118 
1119 static int
1120 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1121 {
1122 	struct ut_nested_ch *_ch = ctx_buf;
1123 	struct ut_nested_dev *_dev = io_device;
1124 	struct ut_nested_dev *_child;
1125 
1126 	_child = _dev->child;
1127 
1128 	if (_child != NULL) {
1129 		_ch->child = spdk_get_io_channel(_child);
1130 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1131 	} else {
1132 		_ch->child = NULL;
1133 	}
1134 
1135 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1136 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1137 
1138 	return 0;
1139 }
1140 
1141 static void
1142 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1143 {
1144 	struct ut_nested_ch *_ch = ctx_buf;
1145 	struct spdk_io_channel *child;
1146 
1147 	child = _ch->child;
1148 	if (child != NULL) {
1149 		spdk_put_io_channel(child);
1150 	}
1151 
1152 	spdk_poller_unregister(&_ch->poller);
1153 }
1154 
1155 static void
1156 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1157 {
1158 	CU_ASSERT(ch->ref == 1);
1159 	CU_ASSERT(ch->dev == dev);
1160 	CU_ASSERT(dev->refcnt == 1);
1161 }
1162 
1163 static void
1164 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1165 {
1166 	CU_ASSERT(ch->ref == 0);
1167 	CU_ASSERT(ch->destroy_ref == 1);
1168 	CU_ASSERT(dev->refcnt == 1);
1169 }
1170 
1171 static void
1172 ut_check_nested_ch_destroy_post(struct io_device *dev)
1173 {
1174 	CU_ASSERT(dev->refcnt == 0);
1175 }
1176 
1177 static void
1178 ut_check_nested_poller_register(struct spdk_poller *poller)
1179 {
1180 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1181 }
1182 
1183 static void
1184 nested_channel(void)
1185 {
1186 	struct ut_nested_dev _dev1, _dev2, _dev3;
1187 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1188 	struct io_device *dev1, *dev2, *dev3;
1189 	struct spdk_io_channel *ch1, *ch2, *ch3;
1190 	struct spdk_poller *poller;
1191 	struct spdk_thread *thread;
1192 
1193 	allocate_threads(1);
1194 	set_thread(0);
1195 
1196 	thread = spdk_get_thread();
1197 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1198 
1199 	_dev1.child = &_dev2;
1200 	_dev2.child = &_dev3;
1201 	_dev3.child = NULL;
1202 
1203 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1204 				sizeof(struct ut_nested_ch), "dev1");
1205 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1206 				sizeof(struct ut_nested_ch), "dev2");
1207 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1208 				sizeof(struct ut_nested_ch), "dev3");
1209 
1210 	dev1 = io_device_get(&_dev1);
1211 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1212 	dev2 = io_device_get(&_dev2);
1213 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1214 	dev3 = io_device_get(&_dev3);
1215 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1216 
1217 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1218 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1219 	 */
1220 	ch1 = spdk_get_io_channel(&_dev1);
1221 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1222 
1223 	_ch1 = spdk_io_channel_get_ctx(ch1);
1224 	ch2 = _ch1->child;
1225 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1226 
1227 	_ch2 = spdk_io_channel_get_ctx(ch2);
1228 	ch3 = _ch2->child;
1229 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1230 
1231 	_ch3 = spdk_io_channel_get_ctx(ch3);
1232 	CU_ASSERT(_ch3->child == NULL);
1233 
1234 	ut_check_nested_ch_create(ch1, dev1);
1235 	ut_check_nested_ch_create(ch2, dev2);
1236 	ut_check_nested_ch_create(ch3, dev3);
1237 
1238 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1239 
1240 	ut_check_nested_poller_register(poller);
1241 	ut_check_nested_poller_register(_ch1->poller);
1242 	ut_check_nested_poller_register(_ch2->poller);
1243 	ut_check_nested_poller_register(_ch3->poller);
1244 
1245 	spdk_poller_unregister(&poller);
1246 	poll_thread_times(0, 1);
1247 
1248 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1249 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1250 	 */
1251 	spdk_put_io_channel(ch1);
1252 
1253 	/* Start exiting the current thread after unregistering the non-nested
1254 	 * I/O channel.
1255 	 */
1256 	spdk_thread_exit(thread);
1257 
1258 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1259 	poll_thread_times(0, 1);
1260 	ut_check_nested_ch_destroy_post(dev1);
1261 
1262 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1263 
1264 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1265 	poll_thread_times(0, 1);
1266 	ut_check_nested_ch_destroy_post(dev2);
1267 
1268 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1269 
1270 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1271 	poll_thread_times(0, 1);
1272 	ut_check_nested_ch_destroy_post(dev3);
1273 
1274 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1275 
1276 	spdk_io_device_unregister(&_dev1, NULL);
1277 	spdk_io_device_unregister(&_dev2, NULL);
1278 	spdk_io_device_unregister(&_dev3, NULL);
1279 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1280 
1281 	free_threads();
1282 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1283 }
1284 
1285 static int
1286 create_cb2(void *io_device, void *ctx_buf)
1287 {
1288 	uint64_t *devcnt = (uint64_t *)io_device;
1289 
1290 	*devcnt += 1;
1291 
1292 	return 0;
1293 }
1294 
1295 static void
1296 destroy_cb2(void *io_device, void *ctx_buf)
1297 {
1298 	uint64_t *devcnt = (uint64_t *)io_device;
1299 
1300 	CU_ASSERT(*devcnt > 0);
1301 	*devcnt -= 1;
1302 }
1303 
1304 static void
1305 unregister_cb2(void *io_device)
1306 {
1307 	uint64_t *devcnt = (uint64_t *)io_device;
1308 
1309 	CU_ASSERT(*devcnt == 0);
1310 }
1311 
1312 static void
1313 device_unregister_and_thread_exit_race(void)
1314 {
1315 	uint64_t device = 0;
1316 	struct spdk_io_channel *ch1, *ch2;
1317 	struct spdk_thread *thread1, *thread2;
1318 
1319 	/* Create two threads and each thread gets a channel from the same device. */
1320 	allocate_threads(2);
1321 	set_thread(0);
1322 
1323 	thread1 = spdk_get_thread();
1324 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1325 
1326 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1327 
1328 	ch1 = spdk_get_io_channel(&device);
1329 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1330 
1331 	set_thread(1);
1332 
1333 	thread2 = spdk_get_thread();
1334 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1335 
1336 	ch2 = spdk_get_io_channel(&device);
1337 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1338 
1339 	set_thread(0);
1340 
1341 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1342 	 * and a device are released.
1343 	 */
1344 	spdk_thread_exit(thread1);
1345 	poll_thread(0);
1346 
1347 	spdk_put_io_channel(ch1);
1348 
1349 	spdk_io_device_unregister(&device, unregister_cb2);
1350 	poll_thread(0);
1351 
1352 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1353 
1354 	set_thread(1);
1355 
1356 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1357 	 * is released.
1358 	 */
1359 	spdk_thread_exit(thread2);
1360 	poll_thread(1);
1361 
1362 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1363 
1364 	spdk_put_io_channel(ch2);
1365 	poll_thread(1);
1366 
1367 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1368 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1369 
1370 	poll_thread(0);
1371 
1372 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1373 
1374 	free_threads();
1375 }
1376 
1377 static int
1378 dummy_poller(void *arg)
1379 {
1380 	return SPDK_POLLER_IDLE;
1381 }
1382 
1383 static void
1384 cache_closest_timed_poller(void)
1385 {
1386 	struct spdk_thread *thread;
1387 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1388 
1389 	allocate_threads(1);
1390 	set_thread(0);
1391 
1392 	thread = spdk_get_thread();
1393 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1394 
1395 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1396 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1397 
1398 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1399 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1400 
1401 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1402 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1403 
1404 	poll_threads();
1405 
1406 	/* When multiple timed pollers are inserted, the cache should
1407 	 * have the closest timed poller.
1408 	 */
1409 	CU_ASSERT(thread->first_timed_poller == poller1);
1410 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1411 
1412 	spdk_delay_us(1000);
1413 	poll_threads();
1414 
1415 	CU_ASSERT(thread->first_timed_poller == poller2);
1416 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1417 
1418 	/* If we unregister a timed poller by spdk_poller_unregister()
1419 	 * when it is waiting, it is marked as being unregistered and
1420 	 * is actually unregistered when it is expired.
1421 	 *
1422 	 * Hence if we unregister the closest timed poller when it is waiting,
1423 	 * the cache is not updated to the next timed poller until it is expired.
1424 	 */
1425 	tmp = poller2;
1426 
1427 	spdk_poller_unregister(&poller2);
1428 	CU_ASSERT(poller2 == NULL);
1429 
1430 	spdk_delay_us(499);
1431 	poll_threads();
1432 
1433 	CU_ASSERT(thread->first_timed_poller == tmp);
1434 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1435 
1436 	spdk_delay_us(1);
1437 	poll_threads();
1438 
1439 	CU_ASSERT(thread->first_timed_poller == poller3);
1440 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1441 
1442 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1443 	 * it is marked as being paused and is actually paused when it is expired.
1444 	 *
1445 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1446 	 * is not updated to the next timed poller until it is expired.
1447 	 */
1448 	spdk_poller_pause(poller3);
1449 
1450 	spdk_delay_us(299);
1451 	poll_threads();
1452 
1453 	CU_ASSERT(thread->first_timed_poller == poller3);
1454 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1455 
1456 	spdk_delay_us(1);
1457 	poll_threads();
1458 
1459 	CU_ASSERT(thread->first_timed_poller == poller1);
1460 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1461 
1462 	/* After unregistering all timed pollers, the cache should
1463 	 * be NULL.
1464 	 */
1465 	spdk_poller_unregister(&poller1);
1466 	spdk_poller_unregister(&poller3);
1467 
1468 	spdk_delay_us(200);
1469 	poll_threads();
1470 
1471 	CU_ASSERT(thread->first_timed_poller == NULL);
1472 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1473 
1474 	free_threads();
1475 }
1476 
1477 static void
1478 multi_timed_pollers_have_same_expiration(void)
1479 {
1480 	struct spdk_thread *thread;
1481 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1482 	uint64_t start_ticks;
1483 
1484 	allocate_threads(1);
1485 	set_thread(0);
1486 
1487 	thread = spdk_get_thread();
1488 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1489 
1490 	/*
1491 	 * case 1: multiple timed pollers have the same next_run_tick.
1492 	 */
1493 	start_ticks = spdk_get_ticks();
1494 
1495 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1496 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1497 
1498 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1499 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1500 
1501 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1502 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1503 
1504 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1505 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1506 
1507 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1508 	 * because poller1 is registered earlier than poller2.
1509 	 */
1510 	CU_ASSERT(thread->first_timed_poller == poller1);
1511 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1512 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1513 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1514 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1515 
1516 	/* after 500 usec, poller1 and poller2 are expired. */
1517 	spdk_delay_us(500);
1518 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1519 	poll_threads();
1520 
1521 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1522 	 * has poller3 because poller3 is not expired yet.
1523 	 */
1524 	CU_ASSERT(thread->first_timed_poller == poller3);
1525 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1526 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1527 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1528 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1529 
1530 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1531 	spdk_delay_us(500);
1532 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1533 	poll_threads();
1534 
1535 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1536 	 * has poller4 because poller4 is not expired yet.
1537 	 */
1538 	CU_ASSERT(thread->first_timed_poller == poller4);
1539 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1540 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1541 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1542 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1543 
1544 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1545 	spdk_delay_us(500);
1546 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1547 	poll_threads();
1548 
1549 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1550 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1551 	 */
1552 	CU_ASSERT(thread->first_timed_poller == poller3);
1553 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1554 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1555 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1556 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1557 
1558 	spdk_poller_unregister(&poller1);
1559 	spdk_poller_unregister(&poller2);
1560 	spdk_poller_unregister(&poller3);
1561 	spdk_poller_unregister(&poller4);
1562 
1563 	spdk_delay_us(1500);
1564 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1565 	poll_threads();
1566 
1567 	CU_ASSERT(thread->first_timed_poller == NULL);
1568 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1569 
1570 	/*
1571 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1572 	 */
1573 	start_ticks = spdk_get_ticks();
1574 
1575 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1576 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1577 
1578 	CU_ASSERT(thread->first_timed_poller == poller1);
1579 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1580 
1581 	/* after 250 usec, register poller2 and poller3. */
1582 	spdk_delay_us(250);
1583 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1584 
1585 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1586 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1587 
1588 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1589 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1590 
1591 	CU_ASSERT(thread->first_timed_poller == poller1);
1592 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1593 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1594 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1595 
1596 	/* unregister poller2 which is not the closest. */
1597 	tmp = poller2;
1598 	spdk_poller_unregister(&poller2);
1599 
1600 	/* after 250 usec, poller1 is expired. */
1601 	spdk_delay_us(250);
1602 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1603 	poll_threads();
1604 
1605 	/* poller2 is not unregistered yet because it is not expired. */
1606 	CU_ASSERT(thread->first_timed_poller == tmp);
1607 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1608 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1609 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1610 
1611 	spdk_delay_us(250);
1612 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1613 	poll_threads();
1614 
1615 	CU_ASSERT(thread->first_timed_poller == poller3);
1616 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1617 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1618 
1619 	spdk_poller_unregister(&poller3);
1620 
1621 	spdk_delay_us(250);
1622 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1623 	poll_threads();
1624 
1625 	CU_ASSERT(thread->first_timed_poller == poller1);
1626 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1627 
1628 	spdk_poller_unregister(&poller1);
1629 
1630 	spdk_delay_us(500);
1631 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1632 	poll_threads();
1633 
1634 	CU_ASSERT(thread->first_timed_poller == NULL);
1635 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1636 
1637 	free_threads();
1638 }
1639 
1640 static int
1641 dummy_create_cb(void *io_device, void *ctx_buf)
1642 {
1643 	return 0;
1644 }
1645 
1646 static void
1647 dummy_destroy_cb(void *io_device, void *ctx_buf)
1648 {
1649 }
1650 
1651 /* We had a bug that the compare function for the io_device tree
1652  * did not work as expected because subtraction caused overflow
1653  * when the difference between two keys was more than 32 bits.
1654  * This test case verifies the fix for the bug.
1655  */
1656 static void
1657 io_device_lookup(void)
1658 {
1659 	struct io_device dev1, dev2, *dev;
1660 	struct spdk_io_channel *ch;
1661 
1662 	/* The compare function io_device_cmp() had a overflow bug.
1663 	 * Verify the fix first.
1664 	 */
1665 	dev1.io_device = (void *)0x7FFFFFFF;
1666 	dev2.io_device = NULL;
1667 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1668 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1669 
1670 	/* Check if overflow due to 32 bits does not occur. */
1671 	dev1.io_device = (void *)0x80000000;
1672 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1673 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1674 
1675 	dev1.io_device = (void *)0x100000000;
1676 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1677 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1678 
1679 	dev1.io_device = (void *)0x8000000000000000;
1680 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1681 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1682 
1683 	allocate_threads(1);
1684 	set_thread(0);
1685 
1686 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1687 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1688 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1689 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1690 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1691 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1692 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1693 
1694 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1695 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1696 	 */
1697 	dev = RB_MIN(io_device_tree, &g_io_devices);
1698 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1699 	CU_ASSERT(dev->io_device == (void *)0x1);
1700 
1701 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1702 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1703 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1704 
1705 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1706 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1707 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1708 
1709 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1710 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1711 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1712 
1713 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1714 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1715 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1716 
1717 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1718 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1719 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1720 
1721 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1722 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1723 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1724 
1725 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1726 	 * correct io_devices.
1727 	 */
1728 	ch = spdk_get_io_channel((void *)0x1);
1729 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1730 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1731 	spdk_put_io_channel(ch);
1732 
1733 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1734 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1735 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1736 	spdk_put_io_channel(ch);
1737 
1738 	ch = spdk_get_io_channel((void *)0x80000000);
1739 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1740 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1741 	spdk_put_io_channel(ch);
1742 
1743 	ch = spdk_get_io_channel((void *)0x100000000);
1744 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1745 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1746 	spdk_put_io_channel(ch);
1747 
1748 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1749 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1750 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1751 	spdk_put_io_channel(ch);
1752 
1753 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1754 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1755 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1756 	spdk_put_io_channel(ch);
1757 
1758 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1759 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1760 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1761 	spdk_put_io_channel(ch);
1762 
1763 	poll_threads();
1764 
1765 	spdk_io_device_unregister((void *)0x1, NULL);
1766 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1767 	spdk_io_device_unregister((void *)0x80000000, NULL);
1768 	spdk_io_device_unregister((void *)0x100000000, NULL);
1769 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1770 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1771 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1772 
1773 	poll_threads();
1774 
1775 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1776 
1777 	free_threads();
1778 }
1779 
1780 int
1781 main(int argc, char **argv)
1782 {
1783 	CU_pSuite	suite = NULL;
1784 	unsigned int	num_failures;
1785 
1786 	CU_set_error_action(CUEA_ABORT);
1787 	CU_initialize_registry();
1788 
1789 	suite = CU_add_suite("io_channel", NULL, NULL);
1790 
1791 	CU_ADD_TEST(suite, thread_alloc);
1792 	CU_ADD_TEST(suite, thread_send_msg);
1793 	CU_ADD_TEST(suite, thread_poller);
1794 	CU_ADD_TEST(suite, poller_pause);
1795 	CU_ADD_TEST(suite, thread_for_each);
1796 	CU_ADD_TEST(suite, for_each_channel_remove);
1797 	CU_ADD_TEST(suite, for_each_channel_unreg);
1798 	CU_ADD_TEST(suite, thread_name);
1799 	CU_ADD_TEST(suite, channel);
1800 	CU_ADD_TEST(suite, channel_destroy_races);
1801 	CU_ADD_TEST(suite, thread_exit_test);
1802 	CU_ADD_TEST(suite, thread_update_stats_test);
1803 	CU_ADD_TEST(suite, nested_channel);
1804 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1805 	CU_ADD_TEST(suite, cache_closest_timed_poller);
1806 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
1807 	CU_ADD_TEST(suite, io_device_lookup);
1808 
1809 	CU_basic_set_mode(CU_BRM_VERBOSE);
1810 	CU_basic_run_tests();
1811 	num_failures = CU_get_number_of_failures();
1812 	CU_cleanup_registry();
1813 	return num_failures;
1814 }
1815