xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 6f338d4bf3a8a91b7abe377a605a321ea2b05bf7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk_cunit.h"
9 
10 #include "thread/thread_internal.h"
11 
12 #include "thread/thread.c"
13 #include "common/lib/ut_multithread.c"
14 
15 static int g_sched_rc = 0;
16 
17 static int
18 _thread_schedule(struct spdk_thread *thread)
19 {
20 	return g_sched_rc;
21 }
22 
23 static bool
24 _thread_op_supported(enum spdk_thread_op op)
25 {
26 	switch (op) {
27 	case SPDK_THREAD_OP_NEW:
28 		return true;
29 	default:
30 		return false;
31 	}
32 }
33 
34 static int
35 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
36 {
37 	switch (op) {
38 	case SPDK_THREAD_OP_NEW:
39 		return _thread_schedule(thread);
40 	default:
41 		return -ENOTSUP;
42 	}
43 }
44 
45 static void
46 thread_alloc(void)
47 {
48 	struct spdk_thread *thread;
49 
50 	/* No schedule callback */
51 	spdk_thread_lib_init(NULL, 0);
52 	thread = spdk_thread_create(NULL, NULL);
53 	SPDK_CU_ASSERT_FATAL(thread != NULL);
54 	spdk_set_thread(thread);
55 	spdk_thread_exit(thread);
56 	while (!spdk_thread_is_exited(thread)) {
57 		spdk_thread_poll(thread, 0, 0);
58 	}
59 	spdk_thread_destroy(thread);
60 	spdk_thread_lib_fini();
61 
62 	/* Schedule callback exists */
63 	spdk_thread_lib_init(_thread_schedule, 0);
64 
65 	/* Scheduling succeeds */
66 	g_sched_rc = 0;
67 	thread = spdk_thread_create(NULL, NULL);
68 	SPDK_CU_ASSERT_FATAL(thread != NULL);
69 	spdk_set_thread(thread);
70 	spdk_thread_exit(thread);
71 	while (!spdk_thread_is_exited(thread)) {
72 		spdk_thread_poll(thread, 0, 0);
73 	}
74 	spdk_thread_destroy(thread);
75 
76 	/* Scheduling fails */
77 	g_sched_rc = -1;
78 	thread = spdk_thread_create(NULL, NULL);
79 	SPDK_CU_ASSERT_FATAL(thread == NULL);
80 
81 	spdk_thread_lib_fini();
82 
83 	/* Scheduling callback exists with extended thread library initialization. */
84 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0,
85 				 SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
86 
87 	/* Scheduling succeeds */
88 	g_sched_rc = 0;
89 	thread = spdk_thread_create(NULL, NULL);
90 	SPDK_CU_ASSERT_FATAL(thread != NULL);
91 	spdk_set_thread(thread);
92 	spdk_thread_exit(thread);
93 	while (!spdk_thread_is_exited(thread)) {
94 		spdk_thread_poll(thread, 0, 0);
95 	}
96 	spdk_thread_destroy(thread);
97 
98 	/* Scheduling fails */
99 	g_sched_rc = -1;
100 	thread = spdk_thread_create(NULL, NULL);
101 	SPDK_CU_ASSERT_FATAL(thread == NULL);
102 
103 	spdk_thread_lib_fini();
104 }
105 
106 static void
107 send_msg_cb(void *ctx)
108 {
109 	bool *done = ctx;
110 
111 	*done = true;
112 }
113 
114 static void
115 thread_send_msg(void)
116 {
117 	struct spdk_thread *thread0;
118 	bool done = false;
119 
120 	allocate_threads(2);
121 	set_thread(0);
122 	thread0 = spdk_get_thread();
123 
124 	set_thread(1);
125 	/* Simulate thread 1 sending a message to thread 0. */
126 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
127 
128 	/* We have not polled thread 0 yet, so done should be false. */
129 	CU_ASSERT(!done);
130 
131 	/*
132 	 * Poll thread 1.  The message was sent to thread 0, so this should be
133 	 *  a nop and done should still be false.
134 	 */
135 	poll_thread(1);
136 	CU_ASSERT(!done);
137 
138 	/*
139 	 * Poll thread 0.  This should execute the message and done should then
140 	 *  be true.
141 	 */
142 	poll_thread(0);
143 	CU_ASSERT(done);
144 
145 	free_threads();
146 }
147 
148 static int
149 poller_run_done(void *ctx)
150 {
151 	bool	*poller_run = ctx;
152 
153 	*poller_run = true;
154 
155 	return -1;
156 }
157 
158 static void
159 thread_poller(void)
160 {
161 	struct spdk_poller	*poller = NULL;
162 	bool			poller_run = false;
163 
164 	allocate_threads(1);
165 
166 	set_thread(0);
167 	MOCK_SET(spdk_get_ticks, 0);
168 	/* Register a poller with no-wait time and test execution */
169 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
170 	CU_ASSERT(poller != NULL);
171 
172 	poll_threads();
173 	CU_ASSERT(poller_run == true);
174 
175 	spdk_poller_unregister(&poller);
176 	CU_ASSERT(poller == NULL);
177 
178 	/* Register a poller with 1000us wait time and test single execution */
179 	poller_run = false;
180 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
181 	CU_ASSERT(poller != NULL);
182 
183 	poll_threads();
184 	CU_ASSERT(poller_run == false);
185 
186 	spdk_delay_us(1000);
187 	poll_threads();
188 	CU_ASSERT(poller_run == true);
189 
190 	poller_run = false;
191 	poll_threads();
192 	CU_ASSERT(poller_run == false);
193 
194 	spdk_delay_us(1000);
195 	poll_threads();
196 	CU_ASSERT(poller_run == true);
197 
198 	spdk_poller_unregister(&poller);
199 	CU_ASSERT(poller == NULL);
200 
201 	free_threads();
202 }
203 
204 struct poller_ctx {
205 	struct spdk_poller	*poller;
206 	bool			run;
207 };
208 
209 static int
210 poller_run_pause(void *ctx)
211 {
212 	struct poller_ctx *poller_ctx = ctx;
213 
214 	poller_ctx->run = true;
215 	spdk_poller_pause(poller_ctx->poller);
216 
217 	return 0;
218 }
219 
220 /* Verify the same poller can be switched multiple times between
221  * pause and resume while it runs.
222  */
223 static int
224 poller_run_pause_resume_pause(void *ctx)
225 {
226 	struct poller_ctx *poller_ctx = ctx;
227 
228 	poller_ctx->run = true;
229 
230 	spdk_poller_pause(poller_ctx->poller);
231 	spdk_poller_resume(poller_ctx->poller);
232 	spdk_poller_pause(poller_ctx->poller);
233 
234 	return 0;
235 }
236 
237 static void
238 poller_msg_pause_cb(void *ctx)
239 {
240 	struct spdk_poller *poller = ctx;
241 
242 	spdk_poller_pause(poller);
243 }
244 
245 static void
246 poller_msg_resume_cb(void *ctx)
247 {
248 	struct spdk_poller *poller = ctx;
249 
250 	spdk_poller_resume(poller);
251 }
252 
253 static void
254 poller_pause(void)
255 {
256 	struct poller_ctx poller_ctx = {};
257 	unsigned int delay[] = { 0, 1000 };
258 	unsigned int i;
259 
260 	allocate_threads(1);
261 	set_thread(0);
262 
263 	/* Register a poller that pauses itself */
264 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
265 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
266 
267 	poller_ctx.run = false;
268 	poll_threads();
269 	CU_ASSERT_EQUAL(poller_ctx.run, true);
270 
271 	poller_ctx.run = false;
272 	poll_threads();
273 	CU_ASSERT_EQUAL(poller_ctx.run, false);
274 
275 	spdk_poller_unregister(&poller_ctx.poller);
276 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
277 
278 	/* Register a poller that switches between pause and resume itself */
279 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
280 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
281 
282 	poller_ctx.run = false;
283 	poll_threads();
284 	CU_ASSERT_EQUAL(poller_ctx.run, true);
285 
286 	poller_ctx.run = false;
287 	poll_threads();
288 	CU_ASSERT_EQUAL(poller_ctx.run, false);
289 
290 	spdk_poller_unregister(&poller_ctx.poller);
291 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
292 
293 	/* Verify that resuming an unpaused poller doesn't do anything */
294 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
295 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
296 
297 	spdk_poller_resume(poller_ctx.poller);
298 
299 	poller_ctx.run = false;
300 	poll_threads();
301 	CU_ASSERT_EQUAL(poller_ctx.run, true);
302 
303 	/* Verify that pausing the same poller twice works too */
304 	spdk_poller_pause(poller_ctx.poller);
305 
306 	poller_ctx.run = false;
307 	poll_threads();
308 	CU_ASSERT_EQUAL(poller_ctx.run, false);
309 
310 	spdk_poller_pause(poller_ctx.poller);
311 	poll_threads();
312 	CU_ASSERT_EQUAL(poller_ctx.run, false);
313 
314 	spdk_poller_resume(poller_ctx.poller);
315 	poll_threads();
316 	CU_ASSERT_EQUAL(poller_ctx.run, true);
317 
318 	/* Verify that a poller is run when it's resumed immediately after pausing */
319 	poller_ctx.run = false;
320 	spdk_poller_pause(poller_ctx.poller);
321 	spdk_poller_resume(poller_ctx.poller);
322 	poll_threads();
323 	CU_ASSERT_EQUAL(poller_ctx.run, true);
324 
325 	spdk_poller_unregister(&poller_ctx.poller);
326 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
327 
328 	/* Poll the thread to make sure the previous poller gets unregistered */
329 	poll_threads();
330 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
331 
332 	/* Verify that it's possible to unregister a paused poller */
333 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
334 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
335 
336 	poller_ctx.run = false;
337 	poll_threads();
338 	CU_ASSERT_EQUAL(poller_ctx.run, true);
339 
340 	spdk_poller_pause(poller_ctx.poller);
341 
342 	poller_ctx.run = false;
343 	poll_threads();
344 	CU_ASSERT_EQUAL(poller_ctx.run, false);
345 
346 	spdk_poller_unregister(&poller_ctx.poller);
347 
348 	poll_threads();
349 	CU_ASSERT_EQUAL(poller_ctx.run, false);
350 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
351 
352 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
353 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
354 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
355 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
356 
357 		spdk_delay_us(delay[i]);
358 		poller_ctx.run = false;
359 		poll_threads();
360 		CU_ASSERT_EQUAL(poller_ctx.run, true);
361 
362 		spdk_poller_pause(poller_ctx.poller);
363 
364 		spdk_delay_us(delay[i]);
365 		poller_ctx.run = false;
366 		poll_threads();
367 		CU_ASSERT_EQUAL(poller_ctx.run, false);
368 
369 		spdk_poller_resume(poller_ctx.poller);
370 
371 		spdk_delay_us(delay[i]);
372 		poll_threads();
373 		CU_ASSERT_EQUAL(poller_ctx.run, true);
374 
375 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
376 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
377 
378 		spdk_delay_us(delay[i]);
379 		poller_ctx.run = false;
380 		poll_threads();
381 		CU_ASSERT_EQUAL(poller_ctx.run, false);
382 
383 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
384 
385 		poll_threads();
386 		if (delay[i] > 0) {
387 			spdk_delay_us(delay[i]);
388 			poll_threads();
389 		}
390 		CU_ASSERT_EQUAL(poller_ctx.run, true);
391 
392 		spdk_poller_unregister(&poller_ctx.poller);
393 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
394 
395 		/* Register a timed poller that pauses itself */
396 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
397 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
398 
399 		spdk_delay_us(delay[i]);
400 		poller_ctx.run = false;
401 		poll_threads();
402 		CU_ASSERT_EQUAL(poller_ctx.run, true);
403 
404 		poller_ctx.run = false;
405 		spdk_delay_us(delay[i]);
406 		poll_threads();
407 		CU_ASSERT_EQUAL(poller_ctx.run, false);
408 
409 		spdk_poller_resume(poller_ctx.poller);
410 
411 		CU_ASSERT_EQUAL(poller_ctx.run, false);
412 		spdk_delay_us(delay[i]);
413 		poll_threads();
414 		CU_ASSERT_EQUAL(poller_ctx.run, true);
415 
416 		spdk_poller_unregister(&poller_ctx.poller);
417 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
418 
419 		/* Register a timed poller that switches between pause and resume itself */
420 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
421 				    &poller_ctx, delay[i]);
422 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
423 
424 		spdk_delay_us(delay[i]);
425 		poller_ctx.run = false;
426 		poll_threads();
427 		CU_ASSERT_EQUAL(poller_ctx.run, true);
428 
429 		poller_ctx.run = false;
430 		spdk_delay_us(delay[i]);
431 		poll_threads();
432 		CU_ASSERT_EQUAL(poller_ctx.run, false);
433 
434 		spdk_poller_resume(poller_ctx.poller);
435 
436 		CU_ASSERT_EQUAL(poller_ctx.run, false);
437 		spdk_delay_us(delay[i]);
438 		poll_threads();
439 		CU_ASSERT_EQUAL(poller_ctx.run, true);
440 
441 		spdk_poller_unregister(&poller_ctx.poller);
442 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
443 	}
444 
445 	free_threads();
446 }
447 
448 static void
449 for_each_cb(void *ctx)
450 {
451 	int *count = ctx;
452 
453 	(*count)++;
454 }
455 
456 static void
457 thread_for_each(void)
458 {
459 	int count = 0;
460 	int i;
461 
462 	allocate_threads(3);
463 	set_thread(0);
464 
465 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
466 
467 	/* We have not polled thread 0 yet, so count should be 0 */
468 	CU_ASSERT(count == 0);
469 
470 	/* Poll each thread to verify the message is passed to each */
471 	for (i = 0; i < 3; i++) {
472 		poll_thread(i);
473 		CU_ASSERT(count == (i + 1));
474 	}
475 
476 	/*
477 	 * After each thread is called, the completion calls it
478 	 * one more time.
479 	 */
480 	poll_thread(0);
481 	CU_ASSERT(count == 4);
482 
483 	free_threads();
484 }
485 
486 static int
487 channel_create(void *io_device, void *ctx_buf)
488 {
489 	int *ch_count = io_device;
490 
491 	(*ch_count)++;
492 	return 0;
493 }
494 
495 static void
496 channel_destroy(void *io_device, void *ctx_buf)
497 {
498 	int *ch_count = io_device;
499 
500 	(*ch_count)--;
501 }
502 
503 static void
504 channel_msg(struct spdk_io_channel_iter *i)
505 {
506 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
507 
508 	(*msg_count)++;
509 	spdk_for_each_channel_continue(i, 0);
510 }
511 
512 static void
513 channel_cpl(struct spdk_io_channel_iter *i, int status)
514 {
515 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
516 
517 	(*msg_count)++;
518 }
519 
520 static void
521 for_each_channel_remove(void)
522 {
523 	struct spdk_io_channel *ch0, *ch1, *ch2;
524 	int ch_count = 0;
525 	int msg_count = 0;
526 
527 	allocate_threads(3);
528 	set_thread(0);
529 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
530 	ch0 = spdk_get_io_channel(&ch_count);
531 	set_thread(1);
532 	ch1 = spdk_get_io_channel(&ch_count);
533 	set_thread(2);
534 	ch2 = spdk_get_io_channel(&ch_count);
535 	CU_ASSERT(ch_count == 3);
536 
537 	/*
538 	 * Test that io_channel handles the case where we start to iterate through
539 	 *  the channels, and during the iteration, one of the channels is deleted.
540 	 * This is done in some different and sometimes non-intuitive orders, because
541 	 *  some operations are deferred and won't execute until their threads are
542 	 *  polled.
543 	 *
544 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
545 	 */
546 	set_thread(0);
547 	spdk_put_io_channel(ch0);
548 	CU_ASSERT(ch_count == 3);
549 	poll_threads();
550 	CU_ASSERT(ch_count == 2);
551 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
552 	CU_ASSERT(msg_count == 0);
553 	poll_threads();
554 	CU_ASSERT(msg_count == 3);
555 
556 	msg_count = 0;
557 
558 	/*
559 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
560 	 *  thread 0 is polled.
561 	 */
562 	ch0 = spdk_get_io_channel(&ch_count);
563 	CU_ASSERT(ch_count == 3);
564 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
565 	spdk_put_io_channel(ch0);
566 	CU_ASSERT(ch_count == 3);
567 
568 	poll_threads();
569 	CU_ASSERT(ch_count == 2);
570 	CU_ASSERT(msg_count == 4);
571 	set_thread(1);
572 	spdk_put_io_channel(ch1);
573 	CU_ASSERT(ch_count == 2);
574 	set_thread(2);
575 	spdk_put_io_channel(ch2);
576 	CU_ASSERT(ch_count == 2);
577 	poll_threads();
578 	CU_ASSERT(ch_count == 0);
579 
580 	spdk_io_device_unregister(&ch_count, NULL);
581 	poll_threads();
582 
583 	free_threads();
584 }
585 
586 struct unreg_ctx {
587 	bool	ch_done;
588 	bool	foreach_done;
589 };
590 
591 static void
592 unreg_ch_done(struct spdk_io_channel_iter *i)
593 {
594 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
595 
596 	ctx->ch_done = true;
597 
598 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
599 	spdk_for_each_channel_continue(i, 0);
600 }
601 
602 static void
603 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
604 {
605 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
606 
607 	ctx->foreach_done = true;
608 }
609 
610 static void
611 for_each_channel_unreg(void)
612 {
613 	struct spdk_io_channel *ch0;
614 	struct io_device *dev;
615 	struct unreg_ctx ctx = {};
616 	int io_target = 0;
617 
618 	allocate_threads(1);
619 	set_thread(0);
620 	CU_ASSERT(RB_EMPTY(&g_io_devices));
621 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
622 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
623 	dev = RB_MIN(io_device_tree, &g_io_devices);
624 	SPDK_CU_ASSERT_FATAL(dev != NULL);
625 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
626 	ch0 = spdk_get_io_channel(&io_target);
627 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
628 
629 	spdk_io_device_unregister(&io_target, NULL);
630 	/*
631 	 * There is an outstanding foreach call on the io_device, so the unregister should not
632 	 *  have removed the device.
633 	 */
634 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
635 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
636 	/*
637 	 * There is already a device registered at &io_target, so a new io_device should not
638 	 *  have been added to g_io_devices.
639 	 */
640 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
641 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
642 
643 	poll_thread(0);
644 	CU_ASSERT(ctx.ch_done == true);
645 	CU_ASSERT(ctx.foreach_done == true);
646 	/*
647 	 * There are no more foreach operations outstanding, so we can unregister the device,
648 	 *  even though a channel still exists for the device.
649 	 */
650 	spdk_io_device_unregister(&io_target, NULL);
651 	CU_ASSERT(RB_EMPTY(&g_io_devices));
652 
653 	set_thread(0);
654 	spdk_put_io_channel(ch0);
655 
656 	poll_threads();
657 
658 	free_threads();
659 }
660 
661 static void
662 thread_name(void)
663 {
664 	struct spdk_thread *thread;
665 	const char *name;
666 
667 	spdk_thread_lib_init(NULL, 0);
668 
669 	/* Create thread with no name, which automatically generates one */
670 	thread = spdk_thread_create(NULL, NULL);
671 	spdk_set_thread(thread);
672 	thread = spdk_get_thread();
673 	SPDK_CU_ASSERT_FATAL(thread != NULL);
674 	name = spdk_thread_get_name(thread);
675 	CU_ASSERT(name != NULL);
676 	spdk_thread_exit(thread);
677 	while (!spdk_thread_is_exited(thread)) {
678 		spdk_thread_poll(thread, 0, 0);
679 	}
680 	spdk_thread_destroy(thread);
681 
682 	/* Create thread named "test_thread" */
683 	thread = spdk_thread_create("test_thread", NULL);
684 	spdk_set_thread(thread);
685 	thread = spdk_get_thread();
686 	SPDK_CU_ASSERT_FATAL(thread != NULL);
687 	name = spdk_thread_get_name(thread);
688 	SPDK_CU_ASSERT_FATAL(name != NULL);
689 	CU_ASSERT(strcmp(name, "test_thread") == 0);
690 	spdk_thread_exit(thread);
691 	while (!spdk_thread_is_exited(thread)) {
692 		spdk_thread_poll(thread, 0, 0);
693 	}
694 	spdk_thread_destroy(thread);
695 
696 	spdk_thread_lib_fini();
697 }
698 
699 static uint64_t g_device1;
700 static uint64_t g_device2;
701 static uint64_t g_device3;
702 
703 static uint64_t g_ctx1 = 0x1111;
704 static uint64_t g_ctx2 = 0x2222;
705 
706 static int g_create_cb_calls = 0;
707 static int g_destroy_cb_calls = 0;
708 
709 static int
710 create_cb_1(void *io_device, void *ctx_buf)
711 {
712 	CU_ASSERT(io_device == &g_device1);
713 	*(uint64_t *)ctx_buf = g_ctx1;
714 	g_create_cb_calls++;
715 	return 0;
716 }
717 
718 static void
719 destroy_cb_1(void *io_device, void *ctx_buf)
720 {
721 	CU_ASSERT(io_device == &g_device1);
722 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
723 	g_destroy_cb_calls++;
724 }
725 
726 static int
727 create_cb_2(void *io_device, void *ctx_buf)
728 {
729 	CU_ASSERT(io_device == &g_device2);
730 	*(uint64_t *)ctx_buf = g_ctx2;
731 	g_create_cb_calls++;
732 	return 0;
733 }
734 
735 static void
736 destroy_cb_2(void *io_device, void *ctx_buf)
737 {
738 	CU_ASSERT(io_device == &g_device2);
739 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
740 	g_destroy_cb_calls++;
741 }
742 
743 static void
744 channel(void)
745 {
746 	struct spdk_io_channel *ch1, *ch2;
747 	void *ctx;
748 
749 	allocate_threads(1);
750 	set_thread(0);
751 
752 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
753 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
754 
755 	g_create_cb_calls = 0;
756 	ch1 = spdk_get_io_channel(&g_device1);
757 	CU_ASSERT(g_create_cb_calls == 1);
758 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
759 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
760 
761 	g_create_cb_calls = 0;
762 	ch2 = spdk_get_io_channel(&g_device1);
763 	CU_ASSERT(g_create_cb_calls == 0);
764 	CU_ASSERT(ch1 == ch2);
765 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
766 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
767 
768 	g_destroy_cb_calls = 0;
769 	spdk_put_io_channel(ch2);
770 	poll_threads();
771 	CU_ASSERT(g_destroy_cb_calls == 0);
772 
773 	g_create_cb_calls = 0;
774 	ch2 = spdk_get_io_channel(&g_device2);
775 	CU_ASSERT(g_create_cb_calls == 1);
776 	CU_ASSERT(ch1 != ch2);
777 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
778 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
779 
780 	ctx = spdk_io_channel_get_ctx(ch2);
781 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
782 
783 	g_destroy_cb_calls = 0;
784 	spdk_put_io_channel(ch1);
785 	poll_threads();
786 	CU_ASSERT(g_destroy_cb_calls == 1);
787 
788 	g_destroy_cb_calls = 0;
789 	spdk_put_io_channel(ch2);
790 	poll_threads();
791 	CU_ASSERT(g_destroy_cb_calls == 1);
792 
793 	ch1 = spdk_get_io_channel(&g_device3);
794 	CU_ASSERT(ch1 == NULL);
795 
796 	spdk_io_device_unregister(&g_device1, NULL);
797 	poll_threads();
798 	spdk_io_device_unregister(&g_device2, NULL);
799 	poll_threads();
800 	CU_ASSERT(RB_EMPTY(&g_io_devices));
801 	free_threads();
802 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
803 }
804 
805 static int
806 create_cb(void *io_device, void *ctx_buf)
807 {
808 	uint64_t *refcnt = (uint64_t *)ctx_buf;
809 
810 	CU_ASSERT(*refcnt == 0);
811 	*refcnt = 1;
812 
813 	return 0;
814 }
815 
816 static void
817 destroy_cb(void *io_device, void *ctx_buf)
818 {
819 	uint64_t *refcnt = (uint64_t *)ctx_buf;
820 
821 	CU_ASSERT(*refcnt == 1);
822 	*refcnt = 0;
823 }
824 
825 /**
826  * This test is checking that a sequence of get, put, get, put without allowing
827  * the deferred put operation to complete doesn't result in releasing the memory
828  * for the channel twice.
829  */
830 static void
831 channel_destroy_races(void)
832 {
833 	uint64_t device;
834 	struct spdk_io_channel *ch;
835 
836 	allocate_threads(1);
837 	set_thread(0);
838 
839 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
840 
841 	ch = spdk_get_io_channel(&device);
842 	SPDK_CU_ASSERT_FATAL(ch != NULL);
843 
844 	spdk_put_io_channel(ch);
845 
846 	ch = spdk_get_io_channel(&device);
847 	SPDK_CU_ASSERT_FATAL(ch != NULL);
848 
849 	spdk_put_io_channel(ch);
850 	poll_threads();
851 
852 	spdk_io_device_unregister(&device, NULL);
853 	poll_threads();
854 
855 	CU_ASSERT(RB_EMPTY(&g_io_devices));
856 	free_threads();
857 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
858 }
859 
860 static void
861 thread_exit_test(void)
862 {
863 	struct spdk_thread *thread;
864 	struct spdk_io_channel *ch;
865 	struct spdk_poller *poller1, *poller2;
866 	void *ctx;
867 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
868 	int rc __attribute__((unused));
869 
870 	MOCK_SET(spdk_get_ticks, 10);
871 	MOCK_SET(spdk_get_ticks_hz, 1);
872 
873 	allocate_threads(4);
874 
875 	/* Test if all pending messages are reaped for the exiting thread, and the
876 	 * thread moves to the exited state.
877 	 */
878 	set_thread(0);
879 	thread = spdk_get_thread();
880 
881 	/* Sending message to thread 0 will be accepted. */
882 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
883 	CU_ASSERT(rc == 0);
884 	CU_ASSERT(!done1);
885 
886 	/* Move thread 0 to the exiting state. */
887 	spdk_thread_exit(thread);
888 
889 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
890 
891 	/* Sending message to thread 0 will be still accepted. */
892 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
893 	CU_ASSERT(rc == 0);
894 
895 	/* Thread 0 will reap pending messages. */
896 	poll_thread(0);
897 	CU_ASSERT(done1 == true);
898 	CU_ASSERT(done2 == true);
899 
900 	/* Thread 0 will move to the exited state. */
901 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
902 
903 	/* Test releasing I/O channel is reaped even after the thread moves to
904 	 * the exiting state
905 	 */
906 	set_thread(1);
907 
908 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
909 
910 	g_create_cb_calls = 0;
911 	ch = spdk_get_io_channel(&g_device1);
912 	CU_ASSERT(g_create_cb_calls == 1);
913 	SPDK_CU_ASSERT_FATAL(ch != NULL);
914 
915 	ctx = spdk_io_channel_get_ctx(ch);
916 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
917 
918 	g_destroy_cb_calls = 0;
919 	spdk_put_io_channel(ch);
920 
921 	thread = spdk_get_thread();
922 	spdk_thread_exit(thread);
923 
924 	/* Thread 1 will not move to the exited state yet because I/O channel release
925 	 * does not complete yet.
926 	 */
927 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
928 
929 	/* Thread 1 will be able to get the another reference of I/O channel
930 	 * even after the thread moves to the exiting state.
931 	 */
932 	g_create_cb_calls = 0;
933 	ch = spdk_get_io_channel(&g_device1);
934 
935 	CU_ASSERT(g_create_cb_calls == 0);
936 	SPDK_CU_ASSERT_FATAL(ch != NULL);
937 
938 	ctx = spdk_io_channel_get_ctx(ch);
939 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
940 
941 	spdk_put_io_channel(ch);
942 
943 	poll_threads();
944 	CU_ASSERT(g_destroy_cb_calls == 1);
945 
946 	/* Thread 1 will move to the exited state after I/O channel is released.
947 	 * are released.
948 	 */
949 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
950 
951 	spdk_io_device_unregister(&g_device1, NULL);
952 	poll_threads();
953 
954 	/* Test if unregistering poller is reaped for the exiting thread, and the
955 	 * thread moves to the exited thread.
956 	 */
957 	set_thread(2);
958 	thread = spdk_get_thread();
959 
960 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
961 	CU_ASSERT(poller1 != NULL);
962 
963 	spdk_poller_unregister(&poller1);
964 
965 	spdk_thread_exit(thread);
966 
967 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
968 
969 	poll_threads();
970 
971 	CU_ASSERT(poller1_run == false);
972 	CU_ASSERT(poller2_run == true);
973 
974 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
975 
976 	spdk_poller_unregister(&poller2);
977 
978 	poll_threads();
979 
980 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
981 
982 	/* Test if the exiting thread is exited forcefully after timeout. */
983 	set_thread(3);
984 	thread = spdk_get_thread();
985 
986 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
987 	CU_ASSERT(poller1 != NULL);
988 
989 	spdk_thread_exit(thread);
990 
991 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
992 
993 	MOCK_SET(spdk_get_ticks, 11);
994 
995 	poll_threads();
996 
997 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
998 
999 	/* Cause timeout forcefully. */
1000 	MOCK_SET(spdk_get_ticks, 15);
1001 
1002 	poll_threads();
1003 
1004 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1005 
1006 	spdk_poller_unregister(&poller1);
1007 
1008 	poll_threads();
1009 
1010 	MOCK_CLEAR(spdk_get_ticks);
1011 	MOCK_CLEAR(spdk_get_ticks_hz);
1012 
1013 	free_threads();
1014 }
1015 
1016 static int
1017 poller_run_idle(void *ctx)
1018 {
1019 	uint64_t delay_us = (uint64_t)ctx;
1020 
1021 	spdk_delay_us(delay_us);
1022 
1023 	return 0;
1024 }
1025 
1026 static int
1027 poller_run_busy(void *ctx)
1028 {
1029 	uint64_t delay_us = (uint64_t)ctx;
1030 
1031 	spdk_delay_us(delay_us);
1032 
1033 	return 1;
1034 }
1035 
1036 static void
1037 thread_update_stats_test(void)
1038 {
1039 	struct spdk_poller	*poller;
1040 	struct spdk_thread	*thread;
1041 
1042 	MOCK_SET(spdk_get_ticks, 10);
1043 
1044 	allocate_threads(1);
1045 
1046 	set_thread(0);
1047 	thread = spdk_get_thread();
1048 
1049 	CU_ASSERT(thread->tsc_last == 10);
1050 	CU_ASSERT(thread->stats.idle_tsc == 0);
1051 	CU_ASSERT(thread->stats.busy_tsc == 0);
1052 
1053 	/* Test if idle_tsc is updated expectedly. */
1054 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1055 	CU_ASSERT(poller != NULL);
1056 
1057 	spdk_delay_us(100);
1058 
1059 	poll_thread_times(0, 1);
1060 
1061 	CU_ASSERT(thread->tsc_last == 1110);
1062 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1063 	CU_ASSERT(thread->stats.busy_tsc == 0);
1064 
1065 	spdk_delay_us(100);
1066 
1067 	poll_thread_times(0, 1);
1068 
1069 	CU_ASSERT(thread->tsc_last == 2210);
1070 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1071 	CU_ASSERT(thread->stats.busy_tsc == 0);
1072 
1073 	spdk_poller_unregister(&poller);
1074 
1075 	/* Test if busy_tsc is updated expectedly. */
1076 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1077 	CU_ASSERT(poller != NULL);
1078 
1079 	spdk_delay_us(10000);
1080 
1081 	poll_thread_times(0, 1);
1082 
1083 	CU_ASSERT(thread->tsc_last == 112210);
1084 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1085 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1086 
1087 	spdk_delay_us(10000);
1088 
1089 	poll_thread_times(0, 1);
1090 
1091 	CU_ASSERT(thread->tsc_last == 222210);
1092 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1093 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1094 
1095 	spdk_poller_unregister(&poller);
1096 
1097 	MOCK_CLEAR(spdk_get_ticks);
1098 
1099 	free_threads();
1100 }
1101 
1102 struct ut_nested_ch {
1103 	struct spdk_io_channel *child;
1104 	struct spdk_poller *poller;
1105 };
1106 
1107 struct ut_nested_dev {
1108 	struct ut_nested_dev *child;
1109 };
1110 
1111 static int
1112 ut_null_poll(void *ctx)
1113 {
1114 	return -1;
1115 }
1116 
1117 static int
1118 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1119 {
1120 	struct ut_nested_ch *_ch = ctx_buf;
1121 	struct ut_nested_dev *_dev = io_device;
1122 	struct ut_nested_dev *_child;
1123 
1124 	_child = _dev->child;
1125 
1126 	if (_child != NULL) {
1127 		_ch->child = spdk_get_io_channel(_child);
1128 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1129 	} else {
1130 		_ch->child = NULL;
1131 	}
1132 
1133 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1134 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1135 
1136 	return 0;
1137 }
1138 
1139 static void
1140 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1141 {
1142 	struct ut_nested_ch *_ch = ctx_buf;
1143 	struct spdk_io_channel *child;
1144 
1145 	child = _ch->child;
1146 	if (child != NULL) {
1147 		spdk_put_io_channel(child);
1148 	}
1149 
1150 	spdk_poller_unregister(&_ch->poller);
1151 }
1152 
1153 static void
1154 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1155 {
1156 	CU_ASSERT(ch->ref == 1);
1157 	CU_ASSERT(ch->dev == dev);
1158 	CU_ASSERT(dev->refcnt == 1);
1159 }
1160 
1161 static void
1162 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1163 {
1164 	CU_ASSERT(ch->ref == 0);
1165 	CU_ASSERT(ch->destroy_ref == 1);
1166 	CU_ASSERT(dev->refcnt == 1);
1167 }
1168 
1169 static void
1170 ut_check_nested_ch_destroy_post(struct io_device *dev)
1171 {
1172 	CU_ASSERT(dev->refcnt == 0);
1173 }
1174 
1175 static void
1176 ut_check_nested_poller_register(struct spdk_poller *poller)
1177 {
1178 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1179 }
1180 
1181 static void
1182 nested_channel(void)
1183 {
1184 	struct ut_nested_dev _dev1, _dev2, _dev3;
1185 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1186 	struct io_device *dev1, *dev2, *dev3;
1187 	struct spdk_io_channel *ch1, *ch2, *ch3;
1188 	struct spdk_poller *poller;
1189 	struct spdk_thread *thread;
1190 
1191 	allocate_threads(1);
1192 	set_thread(0);
1193 
1194 	thread = spdk_get_thread();
1195 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1196 
1197 	_dev1.child = &_dev2;
1198 	_dev2.child = &_dev3;
1199 	_dev3.child = NULL;
1200 
1201 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1202 				sizeof(struct ut_nested_ch), "dev1");
1203 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1204 				sizeof(struct ut_nested_ch), "dev2");
1205 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1206 				sizeof(struct ut_nested_ch), "dev3");
1207 
1208 	dev1 = io_device_get(&_dev1);
1209 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1210 	dev2 = io_device_get(&_dev2);
1211 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1212 	dev3 = io_device_get(&_dev3);
1213 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1214 
1215 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1216 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1217 	 */
1218 	ch1 = spdk_get_io_channel(&_dev1);
1219 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1220 
1221 	_ch1 = spdk_io_channel_get_ctx(ch1);
1222 	ch2 = _ch1->child;
1223 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1224 
1225 	_ch2 = spdk_io_channel_get_ctx(ch2);
1226 	ch3 = _ch2->child;
1227 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1228 
1229 	_ch3 = spdk_io_channel_get_ctx(ch3);
1230 	CU_ASSERT(_ch3->child == NULL);
1231 
1232 	ut_check_nested_ch_create(ch1, dev1);
1233 	ut_check_nested_ch_create(ch2, dev2);
1234 	ut_check_nested_ch_create(ch3, dev3);
1235 
1236 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1237 
1238 	ut_check_nested_poller_register(poller);
1239 	ut_check_nested_poller_register(_ch1->poller);
1240 	ut_check_nested_poller_register(_ch2->poller);
1241 	ut_check_nested_poller_register(_ch3->poller);
1242 
1243 	spdk_poller_unregister(&poller);
1244 	poll_thread_times(0, 1);
1245 
1246 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1247 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1248 	 */
1249 	spdk_put_io_channel(ch1);
1250 
1251 	/* Start exiting the current thread after unregistering the non-nested
1252 	 * I/O channel.
1253 	 */
1254 	spdk_thread_exit(thread);
1255 
1256 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1257 	poll_thread_times(0, 1);
1258 	ut_check_nested_ch_destroy_post(dev1);
1259 
1260 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1261 
1262 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1263 	poll_thread_times(0, 1);
1264 	ut_check_nested_ch_destroy_post(dev2);
1265 
1266 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1267 
1268 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1269 	poll_thread_times(0, 1);
1270 	ut_check_nested_ch_destroy_post(dev3);
1271 
1272 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1273 
1274 	spdk_io_device_unregister(&_dev1, NULL);
1275 	spdk_io_device_unregister(&_dev2, NULL);
1276 	spdk_io_device_unregister(&_dev3, NULL);
1277 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1278 
1279 	free_threads();
1280 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1281 }
1282 
1283 static int
1284 create_cb2(void *io_device, void *ctx_buf)
1285 {
1286 	uint64_t *devcnt = (uint64_t *)io_device;
1287 
1288 	*devcnt += 1;
1289 
1290 	return 0;
1291 }
1292 
1293 static void
1294 destroy_cb2(void *io_device, void *ctx_buf)
1295 {
1296 	uint64_t *devcnt = (uint64_t *)io_device;
1297 
1298 	CU_ASSERT(*devcnt > 0);
1299 	*devcnt -= 1;
1300 }
1301 
1302 static void
1303 unregister_cb2(void *io_device)
1304 {
1305 	uint64_t *devcnt = (uint64_t *)io_device;
1306 
1307 	CU_ASSERT(*devcnt == 0);
1308 }
1309 
1310 static void
1311 device_unregister_and_thread_exit_race(void)
1312 {
1313 	uint64_t device = 0;
1314 	struct spdk_io_channel *ch1, *ch2;
1315 	struct spdk_thread *thread1, *thread2;
1316 
1317 	/* Create two threads and each thread gets a channel from the same device. */
1318 	allocate_threads(2);
1319 	set_thread(0);
1320 
1321 	thread1 = spdk_get_thread();
1322 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1323 
1324 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1325 
1326 	ch1 = spdk_get_io_channel(&device);
1327 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1328 
1329 	set_thread(1);
1330 
1331 	thread2 = spdk_get_thread();
1332 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1333 
1334 	ch2 = spdk_get_io_channel(&device);
1335 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1336 
1337 	set_thread(0);
1338 
1339 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1340 	 * and a device are released.
1341 	 */
1342 	spdk_thread_exit(thread1);
1343 	poll_thread(0);
1344 
1345 	spdk_put_io_channel(ch1);
1346 
1347 	spdk_io_device_unregister(&device, unregister_cb2);
1348 	poll_thread(0);
1349 
1350 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1351 
1352 	set_thread(1);
1353 
1354 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1355 	 * is released.
1356 	 */
1357 	spdk_thread_exit(thread2);
1358 	poll_thread(1);
1359 
1360 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1361 
1362 	spdk_put_io_channel(ch2);
1363 	poll_thread(1);
1364 
1365 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1366 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1367 
1368 	poll_thread(0);
1369 
1370 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1371 
1372 	free_threads();
1373 }
1374 
1375 static int
1376 dummy_poller(void *arg)
1377 {
1378 	return SPDK_POLLER_IDLE;
1379 }
1380 
1381 static void
1382 cache_closest_timed_poller(void)
1383 {
1384 	struct spdk_thread *thread;
1385 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1386 
1387 	allocate_threads(1);
1388 	set_thread(0);
1389 
1390 	thread = spdk_get_thread();
1391 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1392 
1393 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1394 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1395 
1396 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1397 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1398 
1399 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1400 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1401 
1402 	poll_threads();
1403 
1404 	/* When multiple timed pollers are inserted, the cache should
1405 	 * have the closest timed poller.
1406 	 */
1407 	CU_ASSERT(thread->first_timed_poller == poller1);
1408 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1409 
1410 	spdk_delay_us(1000);
1411 	poll_threads();
1412 
1413 	CU_ASSERT(thread->first_timed_poller == poller2);
1414 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1415 
1416 	/* If we unregister a timed poller by spdk_poller_unregister()
1417 	 * when it is waiting, it is marked as being unregistered and
1418 	 * is actually unregistered when it is expired.
1419 	 *
1420 	 * Hence if we unregister the closest timed poller when it is waiting,
1421 	 * the cache is not updated to the next timed poller until it is expired.
1422 	 */
1423 	tmp = poller2;
1424 
1425 	spdk_poller_unregister(&poller2);
1426 	CU_ASSERT(poller2 == NULL);
1427 
1428 	spdk_delay_us(499);
1429 	poll_threads();
1430 
1431 	CU_ASSERT(thread->first_timed_poller == tmp);
1432 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1433 
1434 	spdk_delay_us(1);
1435 	poll_threads();
1436 
1437 	CU_ASSERT(thread->first_timed_poller == poller3);
1438 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1439 
1440 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1441 	 * it is marked as being paused and is actually paused when it is expired.
1442 	 *
1443 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1444 	 * is not updated to the next timed poller until it is expired.
1445 	 */
1446 	spdk_poller_pause(poller3);
1447 
1448 	spdk_delay_us(299);
1449 	poll_threads();
1450 
1451 	CU_ASSERT(thread->first_timed_poller == poller3);
1452 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1453 
1454 	spdk_delay_us(1);
1455 	poll_threads();
1456 
1457 	CU_ASSERT(thread->first_timed_poller == poller1);
1458 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1459 
1460 	/* After unregistering all timed pollers, the cache should
1461 	 * be NULL.
1462 	 */
1463 	spdk_poller_unregister(&poller1);
1464 	spdk_poller_unregister(&poller3);
1465 
1466 	spdk_delay_us(200);
1467 	poll_threads();
1468 
1469 	CU_ASSERT(thread->first_timed_poller == NULL);
1470 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1471 
1472 	free_threads();
1473 }
1474 
1475 static void
1476 multi_timed_pollers_have_same_expiration(void)
1477 {
1478 	struct spdk_thread *thread;
1479 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1480 	uint64_t start_ticks;
1481 
1482 	allocate_threads(1);
1483 	set_thread(0);
1484 
1485 	thread = spdk_get_thread();
1486 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1487 
1488 	/*
1489 	 * case 1: multiple timed pollers have the same next_run_tick.
1490 	 */
1491 	start_ticks = spdk_get_ticks();
1492 
1493 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1494 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1495 
1496 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1497 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1498 
1499 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1500 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1501 
1502 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1503 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1504 
1505 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1506 	 * because poller1 is registered earlier than poller2.
1507 	 */
1508 	CU_ASSERT(thread->first_timed_poller == poller1);
1509 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1510 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1511 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1512 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1513 
1514 	/* after 500 usec, poller1 and poller2 are expired. */
1515 	spdk_delay_us(500);
1516 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1517 	poll_threads();
1518 
1519 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1520 	 * has poller3 because poller3 is not expired yet.
1521 	 */
1522 	CU_ASSERT(thread->first_timed_poller == poller3);
1523 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1524 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1525 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1526 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1527 
1528 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1529 	spdk_delay_us(500);
1530 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1531 	poll_threads();
1532 
1533 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1534 	 * has poller4 because poller4 is not expired yet.
1535 	 */
1536 	CU_ASSERT(thread->first_timed_poller == poller4);
1537 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1538 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1539 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1540 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1541 
1542 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1543 	spdk_delay_us(500);
1544 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1545 	poll_threads();
1546 
1547 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1548 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1549 	 */
1550 	CU_ASSERT(thread->first_timed_poller == poller3);
1551 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1552 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1553 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1554 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1555 
1556 	spdk_poller_unregister(&poller1);
1557 	spdk_poller_unregister(&poller2);
1558 	spdk_poller_unregister(&poller3);
1559 	spdk_poller_unregister(&poller4);
1560 
1561 	spdk_delay_us(1500);
1562 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1563 	poll_threads();
1564 
1565 	CU_ASSERT(thread->first_timed_poller == NULL);
1566 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1567 
1568 	/*
1569 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1570 	 */
1571 	start_ticks = spdk_get_ticks();
1572 
1573 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1574 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1575 
1576 	CU_ASSERT(thread->first_timed_poller == poller1);
1577 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1578 
1579 	/* after 250 usec, register poller2 and poller3. */
1580 	spdk_delay_us(250);
1581 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1582 
1583 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1584 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1585 
1586 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1587 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1588 
1589 	CU_ASSERT(thread->first_timed_poller == poller1);
1590 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1591 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1592 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1593 
1594 	/* unregister poller2 which is not the closest. */
1595 	tmp = poller2;
1596 	spdk_poller_unregister(&poller2);
1597 
1598 	/* after 250 usec, poller1 is expired. */
1599 	spdk_delay_us(250);
1600 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1601 	poll_threads();
1602 
1603 	/* poller2 is not unregistered yet because it is not expired. */
1604 	CU_ASSERT(thread->first_timed_poller == tmp);
1605 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1606 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1607 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1608 
1609 	spdk_delay_us(250);
1610 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1611 	poll_threads();
1612 
1613 	CU_ASSERT(thread->first_timed_poller == poller3);
1614 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1615 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1616 
1617 	spdk_poller_unregister(&poller3);
1618 
1619 	spdk_delay_us(250);
1620 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1621 	poll_threads();
1622 
1623 	CU_ASSERT(thread->first_timed_poller == poller1);
1624 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1625 
1626 	spdk_poller_unregister(&poller1);
1627 
1628 	spdk_delay_us(500);
1629 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1630 	poll_threads();
1631 
1632 	CU_ASSERT(thread->first_timed_poller == NULL);
1633 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1634 
1635 	free_threads();
1636 }
1637 
1638 static int
1639 dummy_create_cb(void *io_device, void *ctx_buf)
1640 {
1641 	return 0;
1642 }
1643 
1644 static void
1645 dummy_destroy_cb(void *io_device, void *ctx_buf)
1646 {
1647 }
1648 
1649 /* We had a bug that the compare function for the io_device tree
1650  * did not work as expected because subtraction caused overflow
1651  * when the difference between two keys was more than 32 bits.
1652  * This test case verifies the fix for the bug.
1653  */
1654 static void
1655 io_device_lookup(void)
1656 {
1657 	struct io_device dev1, dev2, *dev;
1658 	struct spdk_io_channel *ch;
1659 
1660 	/* The compare function io_device_cmp() had a overflow bug.
1661 	 * Verify the fix first.
1662 	 */
1663 	dev1.io_device = (void *)0x7FFFFFFF;
1664 	dev2.io_device = NULL;
1665 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1666 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1667 
1668 	/* Check if overflow due to 32 bits does not occur. */
1669 	dev1.io_device = (void *)0x80000000;
1670 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1671 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1672 
1673 	dev1.io_device = (void *)0x100000000;
1674 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1675 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1676 
1677 	dev1.io_device = (void *)0x8000000000000000;
1678 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1679 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1680 
1681 	allocate_threads(1);
1682 	set_thread(0);
1683 
1684 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1685 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1686 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1687 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1688 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1689 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1690 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1691 
1692 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1693 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1694 	 */
1695 	dev = RB_MIN(io_device_tree, &g_io_devices);
1696 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1697 	CU_ASSERT(dev->io_device == (void *)0x1);
1698 
1699 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1700 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1701 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1702 
1703 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1704 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1705 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1706 
1707 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1708 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1709 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1710 
1711 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1712 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1713 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1714 
1715 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1716 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1717 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1718 
1719 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1720 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1721 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1722 
1723 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1724 	 * correct io_devices.
1725 	 */
1726 	ch = spdk_get_io_channel((void *)0x1);
1727 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1728 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1729 	spdk_put_io_channel(ch);
1730 
1731 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1732 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1733 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1734 	spdk_put_io_channel(ch);
1735 
1736 	ch = spdk_get_io_channel((void *)0x80000000);
1737 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1738 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1739 	spdk_put_io_channel(ch);
1740 
1741 	ch = spdk_get_io_channel((void *)0x100000000);
1742 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1743 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1744 	spdk_put_io_channel(ch);
1745 
1746 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1747 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1748 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1749 	spdk_put_io_channel(ch);
1750 
1751 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1752 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1753 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1754 	spdk_put_io_channel(ch);
1755 
1756 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1757 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1758 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1759 	spdk_put_io_channel(ch);
1760 
1761 	poll_threads();
1762 
1763 	spdk_io_device_unregister((void *)0x1, NULL);
1764 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1765 	spdk_io_device_unregister((void *)0x80000000, NULL);
1766 	spdk_io_device_unregister((void *)0x100000000, NULL);
1767 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1768 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1769 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1770 
1771 	poll_threads();
1772 
1773 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1774 
1775 	free_threads();
1776 }
1777 
1778 int
1779 main(int argc, char **argv)
1780 {
1781 	CU_pSuite	suite = NULL;
1782 	unsigned int	num_failures;
1783 
1784 	CU_set_error_action(CUEA_ABORT);
1785 	CU_initialize_registry();
1786 
1787 	suite = CU_add_suite("io_channel", NULL, NULL);
1788 
1789 	CU_ADD_TEST(suite, thread_alloc);
1790 	CU_ADD_TEST(suite, thread_send_msg);
1791 	CU_ADD_TEST(suite, thread_poller);
1792 	CU_ADD_TEST(suite, poller_pause);
1793 	CU_ADD_TEST(suite, thread_for_each);
1794 	CU_ADD_TEST(suite, for_each_channel_remove);
1795 	CU_ADD_TEST(suite, for_each_channel_unreg);
1796 	CU_ADD_TEST(suite, thread_name);
1797 	CU_ADD_TEST(suite, channel);
1798 	CU_ADD_TEST(suite, channel_destroy_races);
1799 	CU_ADD_TEST(suite, thread_exit_test);
1800 	CU_ADD_TEST(suite, thread_update_stats_test);
1801 	CU_ADD_TEST(suite, nested_channel);
1802 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1803 	CU_ADD_TEST(suite, cache_closest_timed_poller);
1804 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
1805 	CU_ADD_TEST(suite, io_device_lookup);
1806 
1807 	CU_basic_set_mode(CU_BRM_VERBOSE);
1808 	CU_basic_run_tests();
1809 	num_failures = CU_get_number_of_failures();
1810 	CU_cleanup_registry();
1811 	return num_failures;
1812 }
1813