xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision c6c1234de9e0015e670dd0b51bf6ce39ee0e07bd)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "thread/thread_internal.h"
12 
13 #include "thread/thread.c"
14 #include "common/lib/ut_multithread.c"
15 
16 static int g_sched_rc = 0;
17 
18 static int
19 _thread_schedule(struct spdk_thread *thread)
20 {
21 	return g_sched_rc;
22 }
23 
24 static bool
25 _thread_op_supported(enum spdk_thread_op op)
26 {
27 	switch (op) {
28 	case SPDK_THREAD_OP_NEW:
29 		return true;
30 	default:
31 		return false;
32 	}
33 }
34 
35 static int
36 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
37 {
38 	switch (op) {
39 	case SPDK_THREAD_OP_NEW:
40 		return _thread_schedule(thread);
41 	default:
42 		return -ENOTSUP;
43 	}
44 }
45 
46 static void
47 thread_alloc(void)
48 {
49 	struct spdk_thread *thread;
50 
51 	/* No schedule callback */
52 	spdk_thread_lib_init(NULL, 0);
53 	thread = spdk_thread_create(NULL, NULL);
54 	SPDK_CU_ASSERT_FATAL(thread != NULL);
55 	spdk_set_thread(thread);
56 	spdk_thread_exit(thread);
57 	while (!spdk_thread_is_exited(thread)) {
58 		spdk_thread_poll(thread, 0, 0);
59 	}
60 	spdk_thread_destroy(thread);
61 	spdk_thread_lib_fini();
62 
63 	/* Schedule callback exists */
64 	spdk_thread_lib_init(_thread_schedule, 0);
65 
66 	/* Scheduling succeeds */
67 	g_sched_rc = 0;
68 	thread = spdk_thread_create(NULL, NULL);
69 	SPDK_CU_ASSERT_FATAL(thread != NULL);
70 	spdk_set_thread(thread);
71 	spdk_thread_exit(thread);
72 	while (!spdk_thread_is_exited(thread)) {
73 		spdk_thread_poll(thread, 0, 0);
74 	}
75 	spdk_thread_destroy(thread);
76 
77 	/* Scheduling fails */
78 	g_sched_rc = -1;
79 	thread = spdk_thread_create(NULL, NULL);
80 	SPDK_CU_ASSERT_FATAL(thread == NULL);
81 
82 	spdk_thread_lib_fini();
83 
84 	/* Scheduling callback exists with extended thread library initialization. */
85 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0,
86 				 SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
87 
88 	/* Scheduling succeeds */
89 	g_sched_rc = 0;
90 	thread = spdk_thread_create(NULL, NULL);
91 	SPDK_CU_ASSERT_FATAL(thread != NULL);
92 	spdk_set_thread(thread);
93 	spdk_thread_exit(thread);
94 	while (!spdk_thread_is_exited(thread)) {
95 		spdk_thread_poll(thread, 0, 0);
96 	}
97 	spdk_thread_destroy(thread);
98 
99 	/* Scheduling fails */
100 	g_sched_rc = -1;
101 	thread = spdk_thread_create(NULL, NULL);
102 	SPDK_CU_ASSERT_FATAL(thread == NULL);
103 
104 	spdk_thread_lib_fini();
105 }
106 
107 static void
108 send_msg_cb(void *ctx)
109 {
110 	bool *done = ctx;
111 
112 	*done = true;
113 }
114 
115 static void
116 thread_send_msg(void)
117 {
118 	struct spdk_thread *thread0;
119 	bool done = false;
120 
121 	allocate_threads(2);
122 	set_thread(0);
123 	thread0 = spdk_get_thread();
124 
125 	set_thread(1);
126 	/* Simulate thread 1 sending a message to thread 0. */
127 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
128 
129 	/* We have not polled thread 0 yet, so done should be false. */
130 	CU_ASSERT(!done);
131 
132 	/*
133 	 * Poll thread 1.  The message was sent to thread 0, so this should be
134 	 *  a nop and done should still be false.
135 	 */
136 	poll_thread(1);
137 	CU_ASSERT(!done);
138 
139 	/*
140 	 * Poll thread 0.  This should execute the message and done should then
141 	 *  be true.
142 	 */
143 	poll_thread(0);
144 	CU_ASSERT(done);
145 
146 	free_threads();
147 }
148 
149 static int
150 poller_run_done(void *ctx)
151 {
152 	bool	*poller_run = ctx;
153 
154 	*poller_run = true;
155 
156 	return -1;
157 }
158 
159 static void
160 thread_poller(void)
161 {
162 	struct spdk_poller	*poller = NULL;
163 	bool			poller_run = false;
164 
165 	allocate_threads(1);
166 
167 	set_thread(0);
168 	MOCK_SET(spdk_get_ticks, 0);
169 	/* Register a poller with no-wait time and test execution */
170 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
171 	CU_ASSERT(poller != NULL);
172 
173 	poll_threads();
174 	CU_ASSERT(poller_run == true);
175 
176 	spdk_poller_unregister(&poller);
177 	CU_ASSERT(poller == NULL);
178 
179 	/* Register a poller with 1000us wait time and test single execution */
180 	poller_run = false;
181 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
182 	CU_ASSERT(poller != NULL);
183 
184 	poll_threads();
185 	CU_ASSERT(poller_run == false);
186 
187 	spdk_delay_us(1000);
188 	poll_threads();
189 	CU_ASSERT(poller_run == true);
190 
191 	poller_run = false;
192 	poll_threads();
193 	CU_ASSERT(poller_run == false);
194 
195 	spdk_delay_us(1000);
196 	poll_threads();
197 	CU_ASSERT(poller_run == true);
198 
199 	spdk_poller_unregister(&poller);
200 	CU_ASSERT(poller == NULL);
201 
202 	free_threads();
203 }
204 
205 struct poller_ctx {
206 	struct spdk_poller	*poller;
207 	bool			run;
208 };
209 
210 static int
211 poller_run_pause(void *ctx)
212 {
213 	struct poller_ctx *poller_ctx = ctx;
214 
215 	poller_ctx->run = true;
216 	spdk_poller_pause(poller_ctx->poller);
217 
218 	return 0;
219 }
220 
221 /* Verify the same poller can be switched multiple times between
222  * pause and resume while it runs.
223  */
224 static int
225 poller_run_pause_resume_pause(void *ctx)
226 {
227 	struct poller_ctx *poller_ctx = ctx;
228 
229 	poller_ctx->run = true;
230 
231 	spdk_poller_pause(poller_ctx->poller);
232 	spdk_poller_resume(poller_ctx->poller);
233 	spdk_poller_pause(poller_ctx->poller);
234 
235 	return 0;
236 }
237 
238 static void
239 poller_msg_pause_cb(void *ctx)
240 {
241 	struct spdk_poller *poller = ctx;
242 
243 	spdk_poller_pause(poller);
244 }
245 
246 static void
247 poller_msg_resume_cb(void *ctx)
248 {
249 	struct spdk_poller *poller = ctx;
250 
251 	spdk_poller_resume(poller);
252 }
253 
254 static void
255 poller_pause(void)
256 {
257 	struct poller_ctx poller_ctx = {};
258 	unsigned int delay[] = { 0, 1000 };
259 	unsigned int i;
260 
261 	allocate_threads(1);
262 	set_thread(0);
263 
264 	/* Register a poller that pauses itself */
265 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
266 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
267 
268 	poller_ctx.run = false;
269 	poll_threads();
270 	CU_ASSERT_EQUAL(poller_ctx.run, true);
271 
272 	poller_ctx.run = false;
273 	poll_threads();
274 	CU_ASSERT_EQUAL(poller_ctx.run, false);
275 
276 	spdk_poller_unregister(&poller_ctx.poller);
277 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
278 
279 	/* Register a poller that switches between pause and resume itself */
280 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
281 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
282 
283 	poller_ctx.run = false;
284 	poll_threads();
285 	CU_ASSERT_EQUAL(poller_ctx.run, true);
286 
287 	poller_ctx.run = false;
288 	poll_threads();
289 	CU_ASSERT_EQUAL(poller_ctx.run, false);
290 
291 	spdk_poller_unregister(&poller_ctx.poller);
292 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
293 
294 	/* Verify that resuming an unpaused poller doesn't do anything */
295 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
296 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
297 
298 	spdk_poller_resume(poller_ctx.poller);
299 
300 	poller_ctx.run = false;
301 	poll_threads();
302 	CU_ASSERT_EQUAL(poller_ctx.run, true);
303 
304 	/* Verify that pausing the same poller twice works too */
305 	spdk_poller_pause(poller_ctx.poller);
306 
307 	poller_ctx.run = false;
308 	poll_threads();
309 	CU_ASSERT_EQUAL(poller_ctx.run, false);
310 
311 	spdk_poller_pause(poller_ctx.poller);
312 	poll_threads();
313 	CU_ASSERT_EQUAL(poller_ctx.run, false);
314 
315 	spdk_poller_resume(poller_ctx.poller);
316 	poll_threads();
317 	CU_ASSERT_EQUAL(poller_ctx.run, true);
318 
319 	/* Verify that a poller is run when it's resumed immediately after pausing */
320 	poller_ctx.run = false;
321 	spdk_poller_pause(poller_ctx.poller);
322 	spdk_poller_resume(poller_ctx.poller);
323 	poll_threads();
324 	CU_ASSERT_EQUAL(poller_ctx.run, true);
325 
326 	spdk_poller_unregister(&poller_ctx.poller);
327 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
328 
329 	/* Poll the thread to make sure the previous poller gets unregistered */
330 	poll_threads();
331 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
332 
333 	/* Verify that it's possible to unregister a paused poller */
334 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
335 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
336 
337 	poller_ctx.run = false;
338 	poll_threads();
339 	CU_ASSERT_EQUAL(poller_ctx.run, true);
340 
341 	spdk_poller_pause(poller_ctx.poller);
342 
343 	poller_ctx.run = false;
344 	poll_threads();
345 	CU_ASSERT_EQUAL(poller_ctx.run, false);
346 
347 	spdk_poller_unregister(&poller_ctx.poller);
348 
349 	poll_threads();
350 	CU_ASSERT_EQUAL(poller_ctx.run, false);
351 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
352 
353 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
354 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
355 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
356 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
357 
358 		spdk_delay_us(delay[i]);
359 		poller_ctx.run = false;
360 		poll_threads();
361 		CU_ASSERT_EQUAL(poller_ctx.run, true);
362 
363 		spdk_poller_pause(poller_ctx.poller);
364 
365 		spdk_delay_us(delay[i]);
366 		poller_ctx.run = false;
367 		poll_threads();
368 		CU_ASSERT_EQUAL(poller_ctx.run, false);
369 
370 		spdk_poller_resume(poller_ctx.poller);
371 
372 		spdk_delay_us(delay[i]);
373 		poll_threads();
374 		CU_ASSERT_EQUAL(poller_ctx.run, true);
375 
376 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
377 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
378 
379 		spdk_delay_us(delay[i]);
380 		poller_ctx.run = false;
381 		poll_threads();
382 		CU_ASSERT_EQUAL(poller_ctx.run, false);
383 
384 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
385 
386 		poll_threads();
387 		if (delay[i] > 0) {
388 			spdk_delay_us(delay[i]);
389 			poll_threads();
390 		}
391 		CU_ASSERT_EQUAL(poller_ctx.run, true);
392 
393 		spdk_poller_unregister(&poller_ctx.poller);
394 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
395 
396 		/* Register a timed poller that pauses itself */
397 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
398 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
399 
400 		spdk_delay_us(delay[i]);
401 		poller_ctx.run = false;
402 		poll_threads();
403 		CU_ASSERT_EQUAL(poller_ctx.run, true);
404 
405 		poller_ctx.run = false;
406 		spdk_delay_us(delay[i]);
407 		poll_threads();
408 		CU_ASSERT_EQUAL(poller_ctx.run, false);
409 
410 		spdk_poller_resume(poller_ctx.poller);
411 
412 		CU_ASSERT_EQUAL(poller_ctx.run, false);
413 		spdk_delay_us(delay[i]);
414 		poll_threads();
415 		CU_ASSERT_EQUAL(poller_ctx.run, true);
416 
417 		spdk_poller_unregister(&poller_ctx.poller);
418 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
419 
420 		/* Register a timed poller that switches between pause and resume itself */
421 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
422 				    &poller_ctx, delay[i]);
423 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
424 
425 		spdk_delay_us(delay[i]);
426 		poller_ctx.run = false;
427 		poll_threads();
428 		CU_ASSERT_EQUAL(poller_ctx.run, true);
429 
430 		poller_ctx.run = false;
431 		spdk_delay_us(delay[i]);
432 		poll_threads();
433 		CU_ASSERT_EQUAL(poller_ctx.run, false);
434 
435 		spdk_poller_resume(poller_ctx.poller);
436 
437 		CU_ASSERT_EQUAL(poller_ctx.run, false);
438 		spdk_delay_us(delay[i]);
439 		poll_threads();
440 		CU_ASSERT_EQUAL(poller_ctx.run, true);
441 
442 		spdk_poller_unregister(&poller_ctx.poller);
443 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
444 	}
445 
446 	free_threads();
447 }
448 
449 static void
450 for_each_cb(void *ctx)
451 {
452 	int *count = ctx;
453 
454 	(*count)++;
455 }
456 
457 static void
458 thread_for_each(void)
459 {
460 	int count = 0;
461 	int i;
462 
463 	allocate_threads(3);
464 	set_thread(0);
465 
466 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
467 
468 	/* We have not polled thread 0 yet, so count should be 0 */
469 	CU_ASSERT(count == 0);
470 
471 	/* Poll each thread to verify the message is passed to each */
472 	for (i = 0; i < 3; i++) {
473 		poll_thread(i);
474 		CU_ASSERT(count == (i + 1));
475 	}
476 
477 	/*
478 	 * After each thread is called, the completion calls it
479 	 * one more time.
480 	 */
481 	poll_thread(0);
482 	CU_ASSERT(count == 4);
483 
484 	free_threads();
485 }
486 
487 static int
488 channel_create(void *io_device, void *ctx_buf)
489 {
490 	int *ch_count = io_device;
491 
492 	(*ch_count)++;
493 	return 0;
494 }
495 
496 static void
497 channel_destroy(void *io_device, void *ctx_buf)
498 {
499 	int *ch_count = io_device;
500 
501 	(*ch_count)--;
502 }
503 
504 static void
505 channel_msg(struct spdk_io_channel_iter *i)
506 {
507 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
508 
509 	(*msg_count)++;
510 	spdk_for_each_channel_continue(i, 0);
511 }
512 
513 static void
514 channel_cpl(struct spdk_io_channel_iter *i, int status)
515 {
516 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
517 
518 	(*msg_count)++;
519 }
520 
521 static void
522 for_each_channel_remove(void)
523 {
524 	struct spdk_io_channel *ch0, *ch1, *ch2;
525 	int ch_count = 0;
526 	int msg_count = 0;
527 
528 	allocate_threads(3);
529 	set_thread(0);
530 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
531 	ch0 = spdk_get_io_channel(&ch_count);
532 	set_thread(1);
533 	ch1 = spdk_get_io_channel(&ch_count);
534 	set_thread(2);
535 	ch2 = spdk_get_io_channel(&ch_count);
536 	CU_ASSERT(ch_count == 3);
537 
538 	/*
539 	 * Test that io_channel handles the case where we start to iterate through
540 	 *  the channels, and during the iteration, one of the channels is deleted.
541 	 * This is done in some different and sometimes non-intuitive orders, because
542 	 *  some operations are deferred and won't execute until their threads are
543 	 *  polled.
544 	 *
545 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
546 	 */
547 	set_thread(0);
548 	spdk_put_io_channel(ch0);
549 	CU_ASSERT(ch_count == 3);
550 	poll_threads();
551 	CU_ASSERT(ch_count == 2);
552 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
553 	CU_ASSERT(msg_count == 0);
554 	poll_threads();
555 	CU_ASSERT(msg_count == 3);
556 
557 	msg_count = 0;
558 
559 	/*
560 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
561 	 *  thread 0 is polled.
562 	 */
563 	ch0 = spdk_get_io_channel(&ch_count);
564 	CU_ASSERT(ch_count == 3);
565 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
566 	spdk_put_io_channel(ch0);
567 	CU_ASSERT(ch_count == 3);
568 
569 	poll_threads();
570 	CU_ASSERT(ch_count == 2);
571 	CU_ASSERT(msg_count == 4);
572 	set_thread(1);
573 	spdk_put_io_channel(ch1);
574 	CU_ASSERT(ch_count == 2);
575 	set_thread(2);
576 	spdk_put_io_channel(ch2);
577 	CU_ASSERT(ch_count == 2);
578 	poll_threads();
579 	CU_ASSERT(ch_count == 0);
580 
581 	spdk_io_device_unregister(&ch_count, NULL);
582 	poll_threads();
583 
584 	free_threads();
585 }
586 
587 struct unreg_ctx {
588 	bool	ch_done;
589 	bool	foreach_done;
590 };
591 
592 static void
593 unreg_ch_done(struct spdk_io_channel_iter *i)
594 {
595 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
596 
597 	ctx->ch_done = true;
598 
599 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
600 	spdk_for_each_channel_continue(i, 0);
601 }
602 
603 static void
604 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
605 {
606 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
607 
608 	ctx->foreach_done = true;
609 }
610 
611 static void
612 for_each_channel_unreg(void)
613 {
614 	struct spdk_io_channel *ch0;
615 	struct io_device *dev;
616 	struct unreg_ctx ctx = {};
617 	int io_target = 0;
618 
619 	allocate_threads(1);
620 	set_thread(0);
621 	CU_ASSERT(RB_EMPTY(&g_io_devices));
622 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
623 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
624 	dev = RB_MIN(io_device_tree, &g_io_devices);
625 	SPDK_CU_ASSERT_FATAL(dev != NULL);
626 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
627 	ch0 = spdk_get_io_channel(&io_target);
628 
629 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
630 
631 	/*
632 	 * There is already a device registered at &io_target, so a new io_device should not
633 	 *  have been added to g_io_devices.
634 	 */
635 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
636 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
637 
638 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
639 	spdk_io_device_unregister(&io_target, NULL);
640 	/*
641 	 * There is an outstanding foreach call on the io_device, so the unregister should not
642 	 *  have immediately removed the device.
643 	 */
644 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
645 
646 	poll_thread(0);
647 	CU_ASSERT(ctx.ch_done == true);
648 	CU_ASSERT(ctx.foreach_done == true);
649 
650 	/*
651 	 * There are no more foreach operations outstanding, so the device should be
652 	 * unregistered.
653 	 */
654 	CU_ASSERT(RB_EMPTY(&g_io_devices));
655 
656 	set_thread(0);
657 	spdk_put_io_channel(ch0);
658 
659 	poll_threads();
660 
661 	free_threads();
662 }
663 
664 static void
665 thread_name(void)
666 {
667 	struct spdk_thread *thread;
668 	const char *name;
669 
670 	spdk_thread_lib_init(NULL, 0);
671 
672 	/* Create thread with no name, which automatically generates one */
673 	thread = spdk_thread_create(NULL, NULL);
674 	spdk_set_thread(thread);
675 	thread = spdk_get_thread();
676 	SPDK_CU_ASSERT_FATAL(thread != NULL);
677 	name = spdk_thread_get_name(thread);
678 	CU_ASSERT(name != NULL);
679 	spdk_thread_exit(thread);
680 	while (!spdk_thread_is_exited(thread)) {
681 		spdk_thread_poll(thread, 0, 0);
682 	}
683 	spdk_thread_destroy(thread);
684 
685 	/* Create thread named "test_thread" */
686 	thread = spdk_thread_create("test_thread", NULL);
687 	spdk_set_thread(thread);
688 	thread = spdk_get_thread();
689 	SPDK_CU_ASSERT_FATAL(thread != NULL);
690 	name = spdk_thread_get_name(thread);
691 	SPDK_CU_ASSERT_FATAL(name != NULL);
692 	CU_ASSERT(strcmp(name, "test_thread") == 0);
693 	spdk_thread_exit(thread);
694 	while (!spdk_thread_is_exited(thread)) {
695 		spdk_thread_poll(thread, 0, 0);
696 	}
697 	spdk_thread_destroy(thread);
698 
699 	spdk_thread_lib_fini();
700 }
701 
702 static uint64_t g_device1;
703 static uint64_t g_device2;
704 static uint64_t g_device3;
705 
706 static uint64_t g_ctx1 = 0x1111;
707 static uint64_t g_ctx2 = 0x2222;
708 
709 static int g_create_cb_calls = 0;
710 static int g_destroy_cb_calls = 0;
711 
712 static int
713 create_cb_1(void *io_device, void *ctx_buf)
714 {
715 	CU_ASSERT(io_device == &g_device1);
716 	*(uint64_t *)ctx_buf = g_ctx1;
717 	g_create_cb_calls++;
718 	return 0;
719 }
720 
721 static void
722 destroy_cb_1(void *io_device, void *ctx_buf)
723 {
724 	CU_ASSERT(io_device == &g_device1);
725 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
726 	g_destroy_cb_calls++;
727 }
728 
729 static int
730 create_cb_2(void *io_device, void *ctx_buf)
731 {
732 	CU_ASSERT(io_device == &g_device2);
733 	*(uint64_t *)ctx_buf = g_ctx2;
734 	g_create_cb_calls++;
735 	return 0;
736 }
737 
738 static void
739 destroy_cb_2(void *io_device, void *ctx_buf)
740 {
741 	CU_ASSERT(io_device == &g_device2);
742 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
743 	g_destroy_cb_calls++;
744 }
745 
746 static void
747 channel(void)
748 {
749 	struct spdk_io_channel *ch1, *ch2;
750 	void *ctx;
751 
752 	allocate_threads(1);
753 	set_thread(0);
754 
755 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
756 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
757 
758 	g_create_cb_calls = 0;
759 	ch1 = spdk_get_io_channel(&g_device1);
760 	CU_ASSERT(g_create_cb_calls == 1);
761 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
762 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
763 
764 	g_create_cb_calls = 0;
765 	ch2 = spdk_get_io_channel(&g_device1);
766 	CU_ASSERT(g_create_cb_calls == 0);
767 	CU_ASSERT(ch1 == ch2);
768 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
769 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
770 
771 	g_destroy_cb_calls = 0;
772 	spdk_put_io_channel(ch2);
773 	poll_threads();
774 	CU_ASSERT(g_destroy_cb_calls == 0);
775 
776 	g_create_cb_calls = 0;
777 	ch2 = spdk_get_io_channel(&g_device2);
778 	CU_ASSERT(g_create_cb_calls == 1);
779 	CU_ASSERT(ch1 != ch2);
780 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
781 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
782 
783 	ctx = spdk_io_channel_get_ctx(ch2);
784 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
785 
786 	g_destroy_cb_calls = 0;
787 	spdk_put_io_channel(ch1);
788 	poll_threads();
789 	CU_ASSERT(g_destroy_cb_calls == 1);
790 
791 	g_destroy_cb_calls = 0;
792 	spdk_put_io_channel(ch2);
793 	poll_threads();
794 	CU_ASSERT(g_destroy_cb_calls == 1);
795 
796 	ch1 = spdk_get_io_channel(&g_device3);
797 	CU_ASSERT(ch1 == NULL);
798 
799 	spdk_io_device_unregister(&g_device1, NULL);
800 	poll_threads();
801 	spdk_io_device_unregister(&g_device2, NULL);
802 	poll_threads();
803 	CU_ASSERT(RB_EMPTY(&g_io_devices));
804 	free_threads();
805 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
806 }
807 
808 static int
809 create_cb(void *io_device, void *ctx_buf)
810 {
811 	uint64_t *refcnt = (uint64_t *)ctx_buf;
812 
813 	CU_ASSERT(*refcnt == 0);
814 	*refcnt = 1;
815 
816 	return 0;
817 }
818 
819 static void
820 destroy_cb(void *io_device, void *ctx_buf)
821 {
822 	uint64_t *refcnt = (uint64_t *)ctx_buf;
823 
824 	CU_ASSERT(*refcnt == 1);
825 	*refcnt = 0;
826 }
827 
828 /**
829  * This test is checking that a sequence of get, put, get, put without allowing
830  * the deferred put operation to complete doesn't result in releasing the memory
831  * for the channel twice.
832  */
833 static void
834 channel_destroy_races(void)
835 {
836 	uint64_t device;
837 	struct spdk_io_channel *ch;
838 
839 	allocate_threads(1);
840 	set_thread(0);
841 
842 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
843 
844 	ch = spdk_get_io_channel(&device);
845 	SPDK_CU_ASSERT_FATAL(ch != NULL);
846 
847 	spdk_put_io_channel(ch);
848 
849 	ch = spdk_get_io_channel(&device);
850 	SPDK_CU_ASSERT_FATAL(ch != NULL);
851 
852 	spdk_put_io_channel(ch);
853 	poll_threads();
854 
855 	spdk_io_device_unregister(&device, NULL);
856 	poll_threads();
857 
858 	CU_ASSERT(RB_EMPTY(&g_io_devices));
859 	free_threads();
860 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
861 }
862 
863 static void
864 thread_exit_test(void)
865 {
866 	struct spdk_thread *thread;
867 	struct spdk_io_channel *ch;
868 	struct spdk_poller *poller1, *poller2;
869 	void *ctx;
870 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
871 	int rc __attribute__((unused));
872 
873 	MOCK_SET(spdk_get_ticks, 10);
874 	MOCK_SET(spdk_get_ticks_hz, 1);
875 
876 	allocate_threads(4);
877 
878 	/* Test if all pending messages are reaped for the exiting thread, and the
879 	 * thread moves to the exited state.
880 	 */
881 	set_thread(0);
882 	thread = spdk_get_thread();
883 
884 	/* Sending message to thread 0 will be accepted. */
885 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
886 	CU_ASSERT(rc == 0);
887 	CU_ASSERT(!done1);
888 
889 	/* Move thread 0 to the exiting state. */
890 	spdk_thread_exit(thread);
891 
892 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
893 
894 	/* Sending message to thread 0 will be still accepted. */
895 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
896 	CU_ASSERT(rc == 0);
897 
898 	/* Thread 0 will reap pending messages. */
899 	poll_thread(0);
900 	CU_ASSERT(done1 == true);
901 	CU_ASSERT(done2 == true);
902 
903 	/* Thread 0 will move to the exited state. */
904 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
905 
906 	/* Test releasing I/O channel is reaped even after the thread moves to
907 	 * the exiting state
908 	 */
909 	set_thread(1);
910 
911 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
912 
913 	g_create_cb_calls = 0;
914 	ch = spdk_get_io_channel(&g_device1);
915 	CU_ASSERT(g_create_cb_calls == 1);
916 	SPDK_CU_ASSERT_FATAL(ch != NULL);
917 
918 	ctx = spdk_io_channel_get_ctx(ch);
919 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
920 
921 	g_destroy_cb_calls = 0;
922 	spdk_put_io_channel(ch);
923 
924 	thread = spdk_get_thread();
925 	spdk_thread_exit(thread);
926 
927 	/* Thread 1 will not move to the exited state yet because I/O channel release
928 	 * does not complete yet.
929 	 */
930 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
931 
932 	/* Thread 1 will be able to get the another reference of I/O channel
933 	 * even after the thread moves to the exiting state.
934 	 */
935 	g_create_cb_calls = 0;
936 	ch = spdk_get_io_channel(&g_device1);
937 
938 	CU_ASSERT(g_create_cb_calls == 0);
939 	SPDK_CU_ASSERT_FATAL(ch != NULL);
940 
941 	ctx = spdk_io_channel_get_ctx(ch);
942 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
943 
944 	spdk_put_io_channel(ch);
945 
946 	poll_threads();
947 	CU_ASSERT(g_destroy_cb_calls == 1);
948 
949 	/* Thread 1 will move to the exited state after I/O channel is released.
950 	 * are released.
951 	 */
952 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
953 
954 	spdk_io_device_unregister(&g_device1, NULL);
955 	poll_threads();
956 
957 	/* Test if unregistering poller is reaped for the exiting thread, and the
958 	 * thread moves to the exited thread.
959 	 */
960 	set_thread(2);
961 	thread = spdk_get_thread();
962 
963 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
964 	CU_ASSERT(poller1 != NULL);
965 
966 	spdk_poller_unregister(&poller1);
967 
968 	spdk_thread_exit(thread);
969 
970 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
971 
972 	poll_threads();
973 
974 	CU_ASSERT(poller1_run == false);
975 	CU_ASSERT(poller2_run == true);
976 
977 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
978 
979 	spdk_poller_unregister(&poller2);
980 
981 	poll_threads();
982 
983 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
984 
985 	/* Test if the exiting thread is exited forcefully after timeout. */
986 	set_thread(3);
987 	thread = spdk_get_thread();
988 
989 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
990 	CU_ASSERT(poller1 != NULL);
991 
992 	spdk_thread_exit(thread);
993 
994 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
995 
996 	MOCK_SET(spdk_get_ticks, 11);
997 
998 	poll_threads();
999 
1000 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1001 
1002 	/* Cause timeout forcefully. */
1003 	MOCK_SET(spdk_get_ticks, 15);
1004 
1005 	poll_threads();
1006 
1007 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1008 
1009 	spdk_poller_unregister(&poller1);
1010 
1011 	poll_threads();
1012 
1013 	MOCK_CLEAR(spdk_get_ticks);
1014 	MOCK_CLEAR(spdk_get_ticks_hz);
1015 
1016 	free_threads();
1017 }
1018 
1019 static int
1020 poller_run_idle(void *ctx)
1021 {
1022 	uint64_t delay_us = (uint64_t)ctx;
1023 
1024 	spdk_delay_us(delay_us);
1025 
1026 	return 0;
1027 }
1028 
1029 static int
1030 poller_run_busy(void *ctx)
1031 {
1032 	uint64_t delay_us = (uint64_t)ctx;
1033 
1034 	spdk_delay_us(delay_us);
1035 
1036 	return 1;
1037 }
1038 
1039 static void
1040 thread_update_stats_test(void)
1041 {
1042 	struct spdk_poller	*poller;
1043 	struct spdk_thread	*thread;
1044 
1045 	MOCK_SET(spdk_get_ticks, 10);
1046 
1047 	allocate_threads(1);
1048 
1049 	set_thread(0);
1050 	thread = spdk_get_thread();
1051 
1052 	CU_ASSERT(thread->tsc_last == 10);
1053 	CU_ASSERT(thread->stats.idle_tsc == 0);
1054 	CU_ASSERT(thread->stats.busy_tsc == 0);
1055 
1056 	/* Test if idle_tsc is updated expectedly. */
1057 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1058 	CU_ASSERT(poller != NULL);
1059 
1060 	spdk_delay_us(100);
1061 
1062 	poll_thread_times(0, 1);
1063 
1064 	CU_ASSERT(thread->tsc_last == 1110);
1065 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1066 	CU_ASSERT(thread->stats.busy_tsc == 0);
1067 
1068 	spdk_delay_us(100);
1069 
1070 	poll_thread_times(0, 1);
1071 
1072 	CU_ASSERT(thread->tsc_last == 2210);
1073 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1074 	CU_ASSERT(thread->stats.busy_tsc == 0);
1075 
1076 	spdk_poller_unregister(&poller);
1077 
1078 	/* Test if busy_tsc is updated expectedly. */
1079 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1080 	CU_ASSERT(poller != NULL);
1081 
1082 	spdk_delay_us(10000);
1083 
1084 	poll_thread_times(0, 1);
1085 
1086 	CU_ASSERT(thread->tsc_last == 112210);
1087 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1088 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1089 
1090 	spdk_delay_us(10000);
1091 
1092 	poll_thread_times(0, 1);
1093 
1094 	CU_ASSERT(thread->tsc_last == 222210);
1095 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1096 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1097 
1098 	spdk_poller_unregister(&poller);
1099 
1100 	MOCK_CLEAR(spdk_get_ticks);
1101 
1102 	free_threads();
1103 }
1104 
1105 struct ut_nested_ch {
1106 	struct spdk_io_channel *child;
1107 	struct spdk_poller *poller;
1108 };
1109 
1110 struct ut_nested_dev {
1111 	struct ut_nested_dev *child;
1112 };
1113 
1114 static int
1115 ut_null_poll(void *ctx)
1116 {
1117 	return SPDK_POLLER_IDLE;
1118 }
1119 
1120 static int
1121 ut_assert_poller_state_running(void *ctx)
1122 {
1123 	struct spdk_poller *poller = ctx;
1124 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "running");
1125 	return SPDK_POLLER_IDLE;
1126 }
1127 
1128 static int
1129 ut_busy_poll(void *ctx)
1130 {
1131 	return SPDK_POLLER_BUSY;
1132 }
1133 
1134 static int
1135 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1136 {
1137 	struct ut_nested_ch *_ch = ctx_buf;
1138 	struct ut_nested_dev *_dev = io_device;
1139 	struct ut_nested_dev *_child;
1140 
1141 	_child = _dev->child;
1142 
1143 	if (_child != NULL) {
1144 		_ch->child = spdk_get_io_channel(_child);
1145 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1146 	} else {
1147 		_ch->child = NULL;
1148 	}
1149 
1150 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1151 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1152 
1153 	return 0;
1154 }
1155 
1156 static void
1157 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1158 {
1159 	struct ut_nested_ch *_ch = ctx_buf;
1160 	struct spdk_io_channel *child;
1161 
1162 	child = _ch->child;
1163 	if (child != NULL) {
1164 		spdk_put_io_channel(child);
1165 	}
1166 
1167 	spdk_poller_unregister(&_ch->poller);
1168 }
1169 
1170 static void
1171 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1172 {
1173 	CU_ASSERT(ch->ref == 1);
1174 	CU_ASSERT(ch->dev == dev);
1175 	CU_ASSERT(dev->refcnt == 1);
1176 }
1177 
1178 static void
1179 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1180 {
1181 	CU_ASSERT(ch->ref == 0);
1182 	CU_ASSERT(ch->destroy_ref == 1);
1183 	CU_ASSERT(dev->refcnt == 1);
1184 }
1185 
1186 static void
1187 ut_check_nested_ch_destroy_post(struct io_device *dev)
1188 {
1189 	CU_ASSERT(dev->refcnt == 0);
1190 }
1191 
1192 static void
1193 ut_check_nested_poller_register(struct spdk_poller *poller)
1194 {
1195 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1196 }
1197 
1198 static void
1199 nested_channel(void)
1200 {
1201 	struct ut_nested_dev _dev1, _dev2, _dev3;
1202 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1203 	struct io_device *dev1, *dev2, *dev3;
1204 	struct spdk_io_channel *ch1, *ch2, *ch3;
1205 	struct spdk_poller *poller;
1206 	struct spdk_thread *thread;
1207 
1208 	allocate_threads(1);
1209 	set_thread(0);
1210 
1211 	thread = spdk_get_thread();
1212 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1213 
1214 	_dev1.child = &_dev2;
1215 	_dev2.child = &_dev3;
1216 	_dev3.child = NULL;
1217 
1218 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1219 				sizeof(struct ut_nested_ch), "dev1");
1220 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1221 				sizeof(struct ut_nested_ch), "dev2");
1222 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1223 				sizeof(struct ut_nested_ch), "dev3");
1224 
1225 	dev1 = io_device_get(&_dev1);
1226 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1227 	dev2 = io_device_get(&_dev2);
1228 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1229 	dev3 = io_device_get(&_dev3);
1230 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1231 
1232 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1233 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1234 	 */
1235 	ch1 = spdk_get_io_channel(&_dev1);
1236 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1237 
1238 	_ch1 = spdk_io_channel_get_ctx(ch1);
1239 	ch2 = _ch1->child;
1240 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1241 
1242 	_ch2 = spdk_io_channel_get_ctx(ch2);
1243 	ch3 = _ch2->child;
1244 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1245 
1246 	_ch3 = spdk_io_channel_get_ctx(ch3);
1247 	CU_ASSERT(_ch3->child == NULL);
1248 
1249 	ut_check_nested_ch_create(ch1, dev1);
1250 	ut_check_nested_ch_create(ch2, dev2);
1251 	ut_check_nested_ch_create(ch3, dev3);
1252 
1253 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1254 
1255 	ut_check_nested_poller_register(poller);
1256 	ut_check_nested_poller_register(_ch1->poller);
1257 	ut_check_nested_poller_register(_ch2->poller);
1258 	ut_check_nested_poller_register(_ch3->poller);
1259 
1260 	spdk_poller_unregister(&poller);
1261 	poll_thread_times(0, 1);
1262 
1263 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1264 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1265 	 */
1266 	spdk_put_io_channel(ch1);
1267 
1268 	/* Start exiting the current thread after unregistering the non-nested
1269 	 * I/O channel.
1270 	 */
1271 	spdk_thread_exit(thread);
1272 
1273 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1274 	poll_thread_times(0, 1);
1275 	ut_check_nested_ch_destroy_post(dev1);
1276 
1277 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1278 
1279 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1280 	poll_thread_times(0, 1);
1281 	ut_check_nested_ch_destroy_post(dev2);
1282 
1283 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1284 
1285 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1286 	poll_thread_times(0, 1);
1287 	ut_check_nested_ch_destroy_post(dev3);
1288 
1289 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1290 
1291 	spdk_io_device_unregister(&_dev1, NULL);
1292 	spdk_io_device_unregister(&_dev2, NULL);
1293 	spdk_io_device_unregister(&_dev3, NULL);
1294 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1295 
1296 	free_threads();
1297 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1298 }
1299 
1300 static int
1301 create_cb2(void *io_device, void *ctx_buf)
1302 {
1303 	uint64_t *devcnt = (uint64_t *)io_device;
1304 
1305 	*devcnt += 1;
1306 
1307 	return 0;
1308 }
1309 
1310 static void
1311 destroy_cb2(void *io_device, void *ctx_buf)
1312 {
1313 	uint64_t *devcnt = (uint64_t *)io_device;
1314 
1315 	CU_ASSERT(*devcnt > 0);
1316 	*devcnt -= 1;
1317 }
1318 
1319 static void
1320 unregister_cb2(void *io_device)
1321 {
1322 	uint64_t *devcnt = (uint64_t *)io_device;
1323 
1324 	CU_ASSERT(*devcnt == 0);
1325 }
1326 
1327 static void
1328 device_unregister_and_thread_exit_race(void)
1329 {
1330 	uint64_t device = 0;
1331 	struct spdk_io_channel *ch1, *ch2;
1332 	struct spdk_thread *thread1, *thread2;
1333 
1334 	/* Create two threads and each thread gets a channel from the same device. */
1335 	allocate_threads(2);
1336 	set_thread(0);
1337 
1338 	thread1 = spdk_get_thread();
1339 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1340 
1341 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1342 
1343 	ch1 = spdk_get_io_channel(&device);
1344 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1345 
1346 	set_thread(1);
1347 
1348 	thread2 = spdk_get_thread();
1349 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1350 
1351 	ch2 = spdk_get_io_channel(&device);
1352 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1353 
1354 	set_thread(0);
1355 
1356 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1357 	 * and a device are released.
1358 	 */
1359 	spdk_thread_exit(thread1);
1360 	poll_thread(0);
1361 
1362 	spdk_put_io_channel(ch1);
1363 
1364 	spdk_io_device_unregister(&device, unregister_cb2);
1365 	poll_thread(0);
1366 
1367 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1368 
1369 	set_thread(1);
1370 
1371 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1372 	 * is released.
1373 	 */
1374 	spdk_thread_exit(thread2);
1375 	poll_thread(1);
1376 
1377 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1378 
1379 	spdk_put_io_channel(ch2);
1380 	poll_thread(1);
1381 
1382 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1383 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1384 
1385 	poll_thread(0);
1386 
1387 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1388 
1389 	free_threads();
1390 }
1391 
1392 static int
1393 dummy_poller(void *arg)
1394 {
1395 	return SPDK_POLLER_IDLE;
1396 }
1397 
1398 static void
1399 cache_closest_timed_poller(void)
1400 {
1401 	struct spdk_thread *thread;
1402 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1403 
1404 	allocate_threads(1);
1405 	set_thread(0);
1406 
1407 	thread = spdk_get_thread();
1408 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1409 
1410 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1411 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1412 
1413 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1414 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1415 
1416 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1417 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1418 
1419 	poll_threads();
1420 
1421 	/* When multiple timed pollers are inserted, the cache should
1422 	 * have the closest timed poller.
1423 	 */
1424 	CU_ASSERT(thread->first_timed_poller == poller1);
1425 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1426 
1427 	spdk_delay_us(1000);
1428 	poll_threads();
1429 
1430 	CU_ASSERT(thread->first_timed_poller == poller2);
1431 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1432 
1433 	/* If we unregister a timed poller by spdk_poller_unregister()
1434 	 * when it is waiting, it is marked as being unregistered and
1435 	 * is actually unregistered when it is expired.
1436 	 *
1437 	 * Hence if we unregister the closest timed poller when it is waiting,
1438 	 * the cache is not updated to the next timed poller until it is expired.
1439 	 */
1440 	tmp = poller2;
1441 
1442 	spdk_poller_unregister(&poller2);
1443 	CU_ASSERT(poller2 == NULL);
1444 
1445 	spdk_delay_us(499);
1446 	poll_threads();
1447 
1448 	CU_ASSERT(thread->first_timed_poller == tmp);
1449 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1450 
1451 	spdk_delay_us(1);
1452 	poll_threads();
1453 
1454 	CU_ASSERT(thread->first_timed_poller == poller3);
1455 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1456 
1457 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1458 	 * it is marked as being paused and is actually paused when it is expired.
1459 	 *
1460 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1461 	 * is not updated to the next timed poller until it is expired.
1462 	 */
1463 	spdk_poller_pause(poller3);
1464 
1465 	spdk_delay_us(299);
1466 	poll_threads();
1467 
1468 	CU_ASSERT(thread->first_timed_poller == poller3);
1469 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1470 
1471 	spdk_delay_us(1);
1472 	poll_threads();
1473 
1474 	CU_ASSERT(thread->first_timed_poller == poller1);
1475 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1476 
1477 	/* After unregistering all timed pollers, the cache should
1478 	 * be NULL.
1479 	 */
1480 	spdk_poller_unregister(&poller1);
1481 	spdk_poller_unregister(&poller3);
1482 
1483 	spdk_delay_us(200);
1484 	poll_threads();
1485 
1486 	CU_ASSERT(thread->first_timed_poller == NULL);
1487 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1488 
1489 	free_threads();
1490 }
1491 
1492 static void
1493 multi_timed_pollers_have_same_expiration(void)
1494 {
1495 	struct spdk_thread *thread;
1496 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1497 	uint64_t start_ticks;
1498 
1499 	allocate_threads(1);
1500 	set_thread(0);
1501 
1502 	thread = spdk_get_thread();
1503 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1504 
1505 	/*
1506 	 * case 1: multiple timed pollers have the same next_run_tick.
1507 	 */
1508 	start_ticks = spdk_get_ticks();
1509 
1510 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1511 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1512 
1513 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1514 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1515 
1516 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1517 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1518 
1519 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1520 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1521 
1522 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1523 	 * because poller1 is registered earlier than poller2.
1524 	 */
1525 	CU_ASSERT(thread->first_timed_poller == poller1);
1526 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1527 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1528 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1529 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1530 
1531 	/* after 500 usec, poller1 and poller2 are expired. */
1532 	spdk_delay_us(500);
1533 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1534 	poll_threads();
1535 
1536 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1537 	 * has poller3 because poller3 is not expired yet.
1538 	 */
1539 	CU_ASSERT(thread->first_timed_poller == poller3);
1540 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1541 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1542 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1543 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1544 
1545 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1546 	spdk_delay_us(500);
1547 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1548 	poll_threads();
1549 
1550 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1551 	 * has poller4 because poller4 is not expired yet.
1552 	 */
1553 	CU_ASSERT(thread->first_timed_poller == poller4);
1554 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1555 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1556 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1557 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1558 
1559 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1560 	spdk_delay_us(500);
1561 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1562 	poll_threads();
1563 
1564 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1565 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1566 	 */
1567 	CU_ASSERT(thread->first_timed_poller == poller3);
1568 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1569 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1570 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1571 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1572 
1573 	spdk_poller_unregister(&poller1);
1574 	spdk_poller_unregister(&poller2);
1575 	spdk_poller_unregister(&poller3);
1576 	spdk_poller_unregister(&poller4);
1577 
1578 	spdk_delay_us(1500);
1579 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1580 	poll_threads();
1581 
1582 	CU_ASSERT(thread->first_timed_poller == NULL);
1583 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1584 
1585 	/*
1586 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1587 	 */
1588 	start_ticks = spdk_get_ticks();
1589 
1590 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1591 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1592 
1593 	CU_ASSERT(thread->first_timed_poller == poller1);
1594 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1595 
1596 	/* after 250 usec, register poller2 and poller3. */
1597 	spdk_delay_us(250);
1598 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1599 
1600 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1601 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1602 
1603 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1604 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1605 
1606 	CU_ASSERT(thread->first_timed_poller == poller1);
1607 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1608 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1609 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1610 
1611 	/* unregister poller2 which is not the closest. */
1612 	tmp = poller2;
1613 	spdk_poller_unregister(&poller2);
1614 
1615 	/* after 250 usec, poller1 is expired. */
1616 	spdk_delay_us(250);
1617 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1618 	poll_threads();
1619 
1620 	/* poller2 is not unregistered yet because it is not expired. */
1621 	CU_ASSERT(thread->first_timed_poller == tmp);
1622 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1623 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1624 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1625 
1626 	spdk_delay_us(250);
1627 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1628 	poll_threads();
1629 
1630 	CU_ASSERT(thread->first_timed_poller == poller3);
1631 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1632 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1633 
1634 	spdk_poller_unregister(&poller3);
1635 
1636 	spdk_delay_us(250);
1637 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1638 	poll_threads();
1639 
1640 	CU_ASSERT(thread->first_timed_poller == poller1);
1641 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1642 
1643 	spdk_poller_unregister(&poller1);
1644 
1645 	spdk_delay_us(500);
1646 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1647 	poll_threads();
1648 
1649 	CU_ASSERT(thread->first_timed_poller == NULL);
1650 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1651 
1652 	free_threads();
1653 }
1654 
1655 static int
1656 dummy_create_cb(void *io_device, void *ctx_buf)
1657 {
1658 	return 0;
1659 }
1660 
1661 static void
1662 dummy_destroy_cb(void *io_device, void *ctx_buf)
1663 {
1664 }
1665 
1666 /* We had a bug that the compare function for the io_device tree
1667  * did not work as expected because subtraction caused overflow
1668  * when the difference between two keys was more than 32 bits.
1669  * This test case verifies the fix for the bug.
1670  */
1671 static void
1672 io_device_lookup(void)
1673 {
1674 	struct io_device dev1, dev2, *dev;
1675 	struct spdk_io_channel *ch;
1676 
1677 	/* The compare function io_device_cmp() had a overflow bug.
1678 	 * Verify the fix first.
1679 	 */
1680 	dev1.io_device = (void *)0x7FFFFFFF;
1681 	dev2.io_device = NULL;
1682 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1683 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1684 
1685 	/* Check if overflow due to 32 bits does not occur. */
1686 	dev1.io_device = (void *)0x80000000;
1687 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1688 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1689 
1690 	dev1.io_device = (void *)0x100000000;
1691 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1692 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1693 
1694 	dev1.io_device = (void *)0x8000000000000000;
1695 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1696 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1697 
1698 	allocate_threads(1);
1699 	set_thread(0);
1700 
1701 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1702 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1703 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1704 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1705 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1706 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1707 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1708 
1709 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1710 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1711 	 */
1712 	dev = RB_MIN(io_device_tree, &g_io_devices);
1713 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1714 	CU_ASSERT(dev->io_device == (void *)0x1);
1715 
1716 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1717 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1718 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1719 
1720 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1721 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1722 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1723 
1724 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1725 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1726 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1727 
1728 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1729 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1730 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1731 
1732 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1733 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1734 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1735 
1736 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1737 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1738 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1739 
1740 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1741 	 * correct io_devices.
1742 	 */
1743 	ch = spdk_get_io_channel((void *)0x1);
1744 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1745 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1746 	spdk_put_io_channel(ch);
1747 
1748 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1749 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1750 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1751 	spdk_put_io_channel(ch);
1752 
1753 	ch = spdk_get_io_channel((void *)0x80000000);
1754 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1755 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1756 	spdk_put_io_channel(ch);
1757 
1758 	ch = spdk_get_io_channel((void *)0x100000000);
1759 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1760 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1761 	spdk_put_io_channel(ch);
1762 
1763 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1764 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1765 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1766 	spdk_put_io_channel(ch);
1767 
1768 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1769 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1770 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1771 	spdk_put_io_channel(ch);
1772 
1773 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1774 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1775 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1776 	spdk_put_io_channel(ch);
1777 
1778 	poll_threads();
1779 
1780 	spdk_io_device_unregister((void *)0x1, NULL);
1781 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1782 	spdk_io_device_unregister((void *)0x80000000, NULL);
1783 	spdk_io_device_unregister((void *)0x100000000, NULL);
1784 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1785 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1786 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1787 
1788 	poll_threads();
1789 
1790 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1791 
1792 	free_threads();
1793 }
1794 
1795 static enum spin_error g_spin_err;
1796 static uint32_t g_spin_err_count = 0;
1797 
1798 static void
1799 ut_track_abort(enum spin_error err)
1800 {
1801 	g_spin_err = err;
1802 	g_spin_err_count++;
1803 }
1804 
1805 static void
1806 spdk_spin(void)
1807 {
1808 	struct spdk_spinlock lock;
1809 
1810 	g_spin_abort_fn = ut_track_abort;
1811 
1812 	/* Do not need to be on an SPDK thread to initialize an spdk_spinlock */
1813 	g_spin_err_count = 0;
1814 	spdk_spin_init(&lock);
1815 	CU_ASSERT(g_spin_err_count == 0);
1816 
1817 	/* Trying to take a lock while not on an SPDK thread is an error */
1818 	g_spin_err_count = 0;
1819 	spdk_spin_lock(&lock);
1820 	CU_ASSERT(g_spin_err_count == 1);
1821 	CU_ASSERT(g_spin_err == SPIN_ERR_NOT_SPDK_THREAD);
1822 
1823 	/* Trying to check if a lock is held while not on an SPDK thread is an error */
1824 	g_spin_err_count = 0;
1825 	spdk_spin_held(&lock);
1826 	CU_ASSERT(g_spin_err_count == 1);
1827 	CU_ASSERT(g_spin_err == SPIN_ERR_NOT_SPDK_THREAD);
1828 
1829 	/* Do not need to be on an SPDK thread to destroy an spdk_spinlock */
1830 	g_spin_err_count = 0;
1831 	spdk_spin_destroy(&lock);
1832 	CU_ASSERT(g_spin_err_count == 0);
1833 
1834 	allocate_threads(2);
1835 	set_thread(0);
1836 
1837 	/* Can initialize an spdk_spinlock on an SPDK thread */
1838 	g_spin_err_count = 0;
1839 	spdk_spin_init(&lock);
1840 	CU_ASSERT(g_spin_err_count == 0);
1841 
1842 	/* Can take spinlock */
1843 	g_spin_err_count = 0;
1844 	spdk_spin_lock(&lock);
1845 	CU_ASSERT(g_spin_err_count == 0);
1846 
1847 	/* Can release spinlock */
1848 	g_spin_err_count = 0;
1849 	spdk_spin_unlock(&lock);
1850 	CU_ASSERT(g_spin_err_count == 0);
1851 
1852 	/* Deadlock detected */
1853 	g_spin_err_count = 0;
1854 	g_spin_err = SPIN_ERR_NONE;
1855 	spdk_spin_lock(&lock);
1856 	CU_ASSERT(g_spin_err_count == 0);
1857 	spdk_spin_lock(&lock);
1858 	CU_ASSERT(g_spin_err_count == 1);
1859 	CU_ASSERT(g_spin_err == SPIN_ERR_DEADLOCK);
1860 
1861 	/* Cannot unlock from wrong thread */
1862 	set_thread(1);
1863 	g_spin_err_count = 0;
1864 	spdk_spin_unlock(&lock);
1865 	CU_ASSERT(g_spin_err_count == 1);
1866 	CU_ASSERT(g_spin_err == SPIN_ERR_WRONG_THREAD);
1867 
1868 	/* Get back to a known good state */
1869 	set_thread(0);
1870 	g_spin_err_count = 0;
1871 	spdk_spin_unlock(&lock);
1872 	CU_ASSERT(g_spin_err_count == 0);
1873 
1874 	/* Cannot release the same lock twice */
1875 	g_spin_err_count = 0;
1876 	spdk_spin_lock(&lock);
1877 	CU_ASSERT(g_spin_err_count == 0);
1878 	spdk_spin_unlock(&lock);
1879 	CU_ASSERT(g_spin_err_count == 0);
1880 	spdk_spin_unlock(&lock);
1881 	CU_ASSERT(g_spin_err_count == 1);
1882 	CU_ASSERT(g_spin_err == SPIN_ERR_WRONG_THREAD);
1883 
1884 	/* A lock that is not held is properly recognized */
1885 	g_spin_err_count = 0;
1886 	CU_ASSERT(!spdk_spin_held(&lock));
1887 	CU_ASSERT(g_spin_err_count == 0);
1888 
1889 	/* A lock that is held is recognized as held by only the thread that holds it. */
1890 	set_thread(1);
1891 	g_spin_err_count = 0;
1892 	spdk_spin_lock(&lock);
1893 	CU_ASSERT(g_spin_err_count == 0);
1894 	CU_ASSERT(spdk_spin_held(&lock));
1895 	CU_ASSERT(g_spin_err_count == 0);
1896 	set_thread(0);
1897 	CU_ASSERT(!spdk_spin_held(&lock));
1898 	CU_ASSERT(g_spin_err_count == 0);
1899 
1900 	/* After releasing, no one thinks it is held */
1901 	set_thread(1);
1902 	spdk_spin_unlock(&lock);
1903 	CU_ASSERT(g_spin_err_count == 0);
1904 	CU_ASSERT(!spdk_spin_held(&lock));
1905 	CU_ASSERT(g_spin_err_count == 0);
1906 	set_thread(0);
1907 	CU_ASSERT(!spdk_spin_held(&lock));
1908 	CU_ASSERT(g_spin_err_count == 0);
1909 
1910 	/* Destroying a lock that is held is an error. */
1911 	set_thread(0);
1912 	g_spin_err_count = 0;
1913 	spdk_spin_lock(&lock);
1914 	CU_ASSERT(g_spin_err_count == 0);
1915 	spdk_spin_destroy(&lock);
1916 	CU_ASSERT(g_spin_err_count == 1);
1917 	CU_ASSERT(g_spin_err == SPIN_ERR_LOCK_HELD);
1918 	g_spin_err_count = 0;
1919 	spdk_spin_unlock(&lock);
1920 	CU_ASSERT(g_spin_err_count == 0);
1921 
1922 	/* Clean up */
1923 	g_spin_err_count = 0;
1924 	spdk_spin_destroy(&lock);
1925 	CU_ASSERT(g_spin_err_count == 0);
1926 	free_threads();
1927 	g_spin_abort_fn = __posix_abort;
1928 }
1929 
1930 static void
1931 for_each_channel_and_thread_exit_race(void)
1932 {
1933 	struct spdk_io_channel *ch1, *ch2;
1934 	struct spdk_thread *thread0;
1935 	int ch_count = 0;
1936 	int msg_count = 0;
1937 
1938 	allocate_threads(3);
1939 	set_thread(0);
1940 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
1941 	set_thread(1);
1942 	ch1 = spdk_get_io_channel(&ch_count);
1943 	set_thread(2);
1944 	ch2 = spdk_get_io_channel(&ch_count);
1945 	CU_ASSERT(ch_count == 2);
1946 
1947 	/*
1948 	 * Test one race condition between spdk_thread_exit() and spdk_for_each_channel().
1949 	 *
1950 	 * thread 0 does not have io_channel and calls spdk_thread_exit() immediately
1951 	 * after spdk_for_each_channel(). In this case, thread 0 should exit after
1952 	 * spdk_for_each_channel() completes.
1953 	 */
1954 
1955 	set_thread(0);
1956 	thread0 = spdk_get_thread();
1957 
1958 	CU_ASSERT(thread0->for_each_count == 0);
1959 
1960 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
1961 	CU_ASSERT(msg_count == 0);
1962 	CU_ASSERT(thread0->for_each_count == 1);
1963 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_RUNNING);
1964 
1965 	spdk_thread_exit(thread0);
1966 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITING);
1967 
1968 	poll_threads();
1969 	CU_ASSERT(msg_count == 3);
1970 	CU_ASSERT(thread0->for_each_count == 0);
1971 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITED);
1972 
1973 	set_thread(1);
1974 	spdk_put_io_channel(ch1);
1975 	CU_ASSERT(ch_count == 2);
1976 	set_thread(2);
1977 	spdk_put_io_channel(ch2);
1978 	CU_ASSERT(ch_count == 2);
1979 	poll_threads();
1980 	CU_ASSERT(ch_count == 0);
1981 
1982 	spdk_io_device_unregister(&ch_count, NULL);
1983 	poll_threads();
1984 
1985 	free_threads();
1986 }
1987 
1988 static void
1989 for_each_thread_and_thread_exit_race(void)
1990 {
1991 	struct spdk_thread *thread0;
1992 	int count = 0;
1993 	int i;
1994 
1995 	allocate_threads(3);
1996 	set_thread(0);
1997 	thread0 = spdk_get_thread();
1998 
1999 	/* Even if thread 0 starts exiting, spdk_for_each_thread() should complete normally
2000 	 * and then thread 0 should be moved to EXITED.
2001 	 */
2002 
2003 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
2004 	CU_ASSERT(thread0->for_each_count == 1);
2005 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_RUNNING);
2006 
2007 	spdk_thread_exit(thread0);
2008 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITING);
2009 
2010 	/* We have not polled thread 0 yet, so count should be 0 */
2011 	CU_ASSERT(count == 0);
2012 
2013 	/* Poll each thread to verify the message is passed to each */
2014 	for (i = 0; i < 3; i++) {
2015 		poll_thread(i);
2016 		CU_ASSERT(count == (i + 1));
2017 	}
2018 
2019 	/*
2020 	 * After each thread is called, the completion calls it
2021 	 * one more time.
2022 	 */
2023 	poll_thread(0);
2024 	CU_ASSERT(count == 4);
2025 
2026 	CU_ASSERT(thread0->for_each_count == 0);
2027 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITED);
2028 
2029 	free_threads();
2030 }
2031 
2032 static void
2033 poller_get_name(void)
2034 {
2035 	struct spdk_poller *named_poller = NULL;
2036 	struct spdk_poller *unnamed_poller = NULL;
2037 	char ut_null_poll_addr[15];
2038 
2039 	allocate_threads(1);
2040 	set_thread(0);
2041 
2042 	/* Register a named and unnamed poller */
2043 	named_poller = spdk_poller_register_named(ut_null_poll, NULL, 0, "name");
2044 	unnamed_poller = spdk_poller_register(ut_null_poll, NULL, 0);
2045 
2046 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_name(named_poller), "name");
2047 
2048 	snprintf(ut_null_poll_addr, sizeof(ut_null_poll_addr), "%p", ut_null_poll);
2049 
2050 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_name(unnamed_poller), ut_null_poll_addr);
2051 
2052 	spdk_poller_unregister(&named_poller);
2053 	spdk_poller_unregister(&unnamed_poller);
2054 	free_threads();
2055 }
2056 
2057 static void
2058 poller_get_id(void)
2059 {
2060 	struct spdk_poller *pollers[3];
2061 	uint64_t poller_expected_id[3] = {1, 2, 3};
2062 	int i;
2063 
2064 	allocate_threads(1);
2065 	set_thread(0);
2066 
2067 	for (i = 0; i < 3; i++) {
2068 		pollers[i] = spdk_poller_register(ut_null_poll, NULL, 0);
2069 	}
2070 
2071 	for (i = 0; i < 3; i++) {
2072 		CU_ASSERT_EQUAL(spdk_poller_get_id(pollers[i]), poller_expected_id[i]);
2073 		spdk_poller_unregister(&pollers[i]);
2074 	}
2075 
2076 	free_threads();
2077 }
2078 
2079 static void
2080 poller_get_state_str(void)
2081 {
2082 	struct spdk_poller *poller = NULL;
2083 
2084 	allocate_threads(1);
2085 	set_thread(0);
2086 
2087 	poller = spdk_poller_register(ut_assert_poller_state_running, NULL, 0);
2088 	poller->arg = poller;
2089 
2090 	/* Assert poller begins in "waiting" state */
2091 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "waiting");
2092 
2093 	/* Assert poller state changes to "running" while being polled and returns to "waiting" */
2094 	poll_thread(0);
2095 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "waiting");
2096 
2097 	/* Assert poller state changes to "paused" and remains "paused" */
2098 	spdk_poller_pause(poller);
2099 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "pausing");
2100 	poll_thread(0);
2101 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "paused");
2102 	poll_thread(0);
2103 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "paused");
2104 
2105 	/* Assert poller state changes after being resumed  */
2106 	spdk_poller_resume(poller);
2107 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "waiting");
2108 	poll_thread(0);
2109 	CU_ASSERT_STRING_EQUAL(spdk_poller_get_state_str(poller), "waiting");
2110 
2111 	spdk_poller_unregister(&poller);
2112 	free_threads();
2113 }
2114 
2115 static void
2116 poller_get_period_ticks(void)
2117 {
2118 	struct spdk_poller *poller_1 = NULL;
2119 	struct spdk_poller *poller_2 = NULL;
2120 	uint64_t period_1 = 0;
2121 	uint64_t period_2 = 10;
2122 
2123 	allocate_threads(1);
2124 	set_thread(0);
2125 
2126 	/* Register poller_1 with 0 us period and poller_2 with non-zero period */
2127 	poller_1 = spdk_poller_register(ut_null_poll, NULL, period_1);
2128 	poller_2 = spdk_poller_register(ut_null_poll, NULL, period_2);
2129 
2130 	CU_ASSERT_EQUAL(spdk_poller_get_period_ticks(poller_1), period_1);
2131 	CU_ASSERT_EQUAL(spdk_poller_get_period_ticks(poller_2), period_2);
2132 
2133 	spdk_poller_unregister(&poller_1);
2134 	spdk_poller_unregister(&poller_2);
2135 	free_threads();
2136 }
2137 
2138 static void
2139 poller_get_stats(void)
2140 {
2141 	struct spdk_poller *idle_poller = NULL;
2142 	struct spdk_poller *busy_poller = NULL;
2143 	struct spdk_poller_stats stats;
2144 	int period = 5;
2145 
2146 	allocate_threads(1);
2147 	set_thread(0);
2148 
2149 	/* Register a "busy" and "idle" poller */
2150 	idle_poller = spdk_poller_register(ut_null_poll, NULL, period);
2151 	busy_poller = spdk_poller_register(ut_busy_poll, NULL, period);
2152 
2153 	spdk_delay_us(period);
2154 	poll_thread(0);
2155 
2156 	/* Check busy poller stats */
2157 	spdk_poller_get_stats(busy_poller, &stats);
2158 	CU_ASSERT_EQUAL(stats.run_count, 1);
2159 	CU_ASSERT_EQUAL(stats.busy_count, 1);
2160 
2161 	memset(&stats, 0, sizeof(stats));
2162 	/* Check idle poller stats */
2163 	spdk_poller_get_stats(idle_poller, &stats);
2164 	CU_ASSERT_EQUAL(stats.run_count, 1);
2165 	CU_ASSERT_EQUAL(stats.busy_count, 0);
2166 
2167 	spdk_poller_unregister(&idle_poller);
2168 	spdk_poller_unregister(&busy_poller);
2169 	free_threads();
2170 }
2171 
2172 
2173 int
2174 main(int argc, char **argv)
2175 {
2176 	CU_pSuite	suite = NULL;
2177 	unsigned int	num_failures;
2178 
2179 	CU_initialize_registry();
2180 
2181 	suite = CU_add_suite("io_channel", NULL, NULL);
2182 
2183 	CU_ADD_TEST(suite, thread_alloc);
2184 	CU_ADD_TEST(suite, thread_send_msg);
2185 	CU_ADD_TEST(suite, thread_poller);
2186 	CU_ADD_TEST(suite, poller_pause);
2187 	CU_ADD_TEST(suite, thread_for_each);
2188 	CU_ADD_TEST(suite, for_each_channel_remove);
2189 	CU_ADD_TEST(suite, for_each_channel_unreg);
2190 	CU_ADD_TEST(suite, thread_name);
2191 	CU_ADD_TEST(suite, channel);
2192 	CU_ADD_TEST(suite, channel_destroy_races);
2193 	CU_ADD_TEST(suite, thread_exit_test);
2194 	CU_ADD_TEST(suite, thread_update_stats_test);
2195 	CU_ADD_TEST(suite, nested_channel);
2196 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
2197 	CU_ADD_TEST(suite, cache_closest_timed_poller);
2198 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
2199 	CU_ADD_TEST(suite, io_device_lookup);
2200 	CU_ADD_TEST(suite, spdk_spin);
2201 	CU_ADD_TEST(suite, for_each_channel_and_thread_exit_race);
2202 	CU_ADD_TEST(suite, for_each_thread_and_thread_exit_race);
2203 	CU_ADD_TEST(suite, poller_get_name);
2204 	CU_ADD_TEST(suite, poller_get_id);
2205 	CU_ADD_TEST(suite, poller_get_state_str);
2206 	CU_ADD_TEST(suite, poller_get_period_ticks);
2207 	CU_ADD_TEST(suite, poller_get_stats);
2208 
2209 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2210 	CU_cleanup_registry();
2211 	return num_failures;
2212 }
2213