xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 45a053c5777494f4e8ce4bc1191c9de3920377f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "thread/thread_internal.h"
12 
13 #include "thread/thread.c"
14 #include "common/lib/ut_multithread.c"
15 
16 static int g_sched_rc = 0;
17 
18 static int
19 _thread_schedule(struct spdk_thread *thread)
20 {
21 	return g_sched_rc;
22 }
23 
24 static bool
25 _thread_op_supported(enum spdk_thread_op op)
26 {
27 	switch (op) {
28 	case SPDK_THREAD_OP_NEW:
29 		return true;
30 	default:
31 		return false;
32 	}
33 }
34 
35 static int
36 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
37 {
38 	switch (op) {
39 	case SPDK_THREAD_OP_NEW:
40 		return _thread_schedule(thread);
41 	default:
42 		return -ENOTSUP;
43 	}
44 }
45 
46 static void
47 thread_alloc(void)
48 {
49 	struct spdk_thread *thread;
50 
51 	/* No schedule callback */
52 	spdk_thread_lib_init(NULL, 0);
53 	thread = spdk_thread_create(NULL, NULL);
54 	SPDK_CU_ASSERT_FATAL(thread != NULL);
55 	spdk_set_thread(thread);
56 	spdk_thread_exit(thread);
57 	while (!spdk_thread_is_exited(thread)) {
58 		spdk_thread_poll(thread, 0, 0);
59 	}
60 	spdk_thread_destroy(thread);
61 	spdk_thread_lib_fini();
62 
63 	/* Schedule callback exists */
64 	spdk_thread_lib_init(_thread_schedule, 0);
65 
66 	/* Scheduling succeeds */
67 	g_sched_rc = 0;
68 	thread = spdk_thread_create(NULL, NULL);
69 	SPDK_CU_ASSERT_FATAL(thread != NULL);
70 	spdk_set_thread(thread);
71 	spdk_thread_exit(thread);
72 	while (!spdk_thread_is_exited(thread)) {
73 		spdk_thread_poll(thread, 0, 0);
74 	}
75 	spdk_thread_destroy(thread);
76 
77 	/* Scheduling fails */
78 	g_sched_rc = -1;
79 	thread = spdk_thread_create(NULL, NULL);
80 	SPDK_CU_ASSERT_FATAL(thread == NULL);
81 
82 	spdk_thread_lib_fini();
83 
84 	/* Scheduling callback exists with extended thread library initialization. */
85 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0,
86 				 SPDK_DEFAULT_MSG_MEMPOOL_SIZE);
87 
88 	/* Scheduling succeeds */
89 	g_sched_rc = 0;
90 	thread = spdk_thread_create(NULL, NULL);
91 	SPDK_CU_ASSERT_FATAL(thread != NULL);
92 	spdk_set_thread(thread);
93 	spdk_thread_exit(thread);
94 	while (!spdk_thread_is_exited(thread)) {
95 		spdk_thread_poll(thread, 0, 0);
96 	}
97 	spdk_thread_destroy(thread);
98 
99 	/* Scheduling fails */
100 	g_sched_rc = -1;
101 	thread = spdk_thread_create(NULL, NULL);
102 	SPDK_CU_ASSERT_FATAL(thread == NULL);
103 
104 	spdk_thread_lib_fini();
105 }
106 
107 static void
108 send_msg_cb(void *ctx)
109 {
110 	bool *done = ctx;
111 
112 	*done = true;
113 }
114 
115 static void
116 thread_send_msg(void)
117 {
118 	struct spdk_thread *thread0;
119 	bool done = false;
120 
121 	allocate_threads(2);
122 	set_thread(0);
123 	thread0 = spdk_get_thread();
124 
125 	set_thread(1);
126 	/* Simulate thread 1 sending a message to thread 0. */
127 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
128 
129 	/* We have not polled thread 0 yet, so done should be false. */
130 	CU_ASSERT(!done);
131 
132 	/*
133 	 * Poll thread 1.  The message was sent to thread 0, so this should be
134 	 *  a nop and done should still be false.
135 	 */
136 	poll_thread(1);
137 	CU_ASSERT(!done);
138 
139 	/*
140 	 * Poll thread 0.  This should execute the message and done should then
141 	 *  be true.
142 	 */
143 	poll_thread(0);
144 	CU_ASSERT(done);
145 
146 	free_threads();
147 }
148 
149 static int
150 poller_run_done(void *ctx)
151 {
152 	bool	*poller_run = ctx;
153 
154 	*poller_run = true;
155 
156 	return -1;
157 }
158 
159 static void
160 thread_poller(void)
161 {
162 	struct spdk_poller	*poller = NULL;
163 	bool			poller_run = false;
164 
165 	allocate_threads(1);
166 
167 	set_thread(0);
168 	MOCK_SET(spdk_get_ticks, 0);
169 	/* Register a poller with no-wait time and test execution */
170 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
171 	CU_ASSERT(poller != NULL);
172 
173 	poll_threads();
174 	CU_ASSERT(poller_run == true);
175 
176 	spdk_poller_unregister(&poller);
177 	CU_ASSERT(poller == NULL);
178 
179 	/* Register a poller with 1000us wait time and test single execution */
180 	poller_run = false;
181 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
182 	CU_ASSERT(poller != NULL);
183 
184 	poll_threads();
185 	CU_ASSERT(poller_run == false);
186 
187 	spdk_delay_us(1000);
188 	poll_threads();
189 	CU_ASSERT(poller_run == true);
190 
191 	poller_run = false;
192 	poll_threads();
193 	CU_ASSERT(poller_run == false);
194 
195 	spdk_delay_us(1000);
196 	poll_threads();
197 	CU_ASSERT(poller_run == true);
198 
199 	spdk_poller_unregister(&poller);
200 	CU_ASSERT(poller == NULL);
201 
202 	free_threads();
203 }
204 
205 struct poller_ctx {
206 	struct spdk_poller	*poller;
207 	bool			run;
208 };
209 
210 static int
211 poller_run_pause(void *ctx)
212 {
213 	struct poller_ctx *poller_ctx = ctx;
214 
215 	poller_ctx->run = true;
216 	spdk_poller_pause(poller_ctx->poller);
217 
218 	return 0;
219 }
220 
221 /* Verify the same poller can be switched multiple times between
222  * pause and resume while it runs.
223  */
224 static int
225 poller_run_pause_resume_pause(void *ctx)
226 {
227 	struct poller_ctx *poller_ctx = ctx;
228 
229 	poller_ctx->run = true;
230 
231 	spdk_poller_pause(poller_ctx->poller);
232 	spdk_poller_resume(poller_ctx->poller);
233 	spdk_poller_pause(poller_ctx->poller);
234 
235 	return 0;
236 }
237 
238 static void
239 poller_msg_pause_cb(void *ctx)
240 {
241 	struct spdk_poller *poller = ctx;
242 
243 	spdk_poller_pause(poller);
244 }
245 
246 static void
247 poller_msg_resume_cb(void *ctx)
248 {
249 	struct spdk_poller *poller = ctx;
250 
251 	spdk_poller_resume(poller);
252 }
253 
254 static void
255 poller_pause(void)
256 {
257 	struct poller_ctx poller_ctx = {};
258 	unsigned int delay[] = { 0, 1000 };
259 	unsigned int i;
260 
261 	allocate_threads(1);
262 	set_thread(0);
263 
264 	/* Register a poller that pauses itself */
265 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
266 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
267 
268 	poller_ctx.run = false;
269 	poll_threads();
270 	CU_ASSERT_EQUAL(poller_ctx.run, true);
271 
272 	poller_ctx.run = false;
273 	poll_threads();
274 	CU_ASSERT_EQUAL(poller_ctx.run, false);
275 
276 	spdk_poller_unregister(&poller_ctx.poller);
277 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
278 
279 	/* Register a poller that switches between pause and resume itself */
280 	poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0);
281 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
282 
283 	poller_ctx.run = false;
284 	poll_threads();
285 	CU_ASSERT_EQUAL(poller_ctx.run, true);
286 
287 	poller_ctx.run = false;
288 	poll_threads();
289 	CU_ASSERT_EQUAL(poller_ctx.run, false);
290 
291 	spdk_poller_unregister(&poller_ctx.poller);
292 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
293 
294 	/* Verify that resuming an unpaused poller doesn't do anything */
295 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
296 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
297 
298 	spdk_poller_resume(poller_ctx.poller);
299 
300 	poller_ctx.run = false;
301 	poll_threads();
302 	CU_ASSERT_EQUAL(poller_ctx.run, true);
303 
304 	/* Verify that pausing the same poller twice works too */
305 	spdk_poller_pause(poller_ctx.poller);
306 
307 	poller_ctx.run = false;
308 	poll_threads();
309 	CU_ASSERT_EQUAL(poller_ctx.run, false);
310 
311 	spdk_poller_pause(poller_ctx.poller);
312 	poll_threads();
313 	CU_ASSERT_EQUAL(poller_ctx.run, false);
314 
315 	spdk_poller_resume(poller_ctx.poller);
316 	poll_threads();
317 	CU_ASSERT_EQUAL(poller_ctx.run, true);
318 
319 	/* Verify that a poller is run when it's resumed immediately after pausing */
320 	poller_ctx.run = false;
321 	spdk_poller_pause(poller_ctx.poller);
322 	spdk_poller_resume(poller_ctx.poller);
323 	poll_threads();
324 	CU_ASSERT_EQUAL(poller_ctx.run, true);
325 
326 	spdk_poller_unregister(&poller_ctx.poller);
327 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
328 
329 	/* Poll the thread to make sure the previous poller gets unregistered */
330 	poll_threads();
331 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
332 
333 	/* Verify that it's possible to unregister a paused poller */
334 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
335 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
336 
337 	poller_ctx.run = false;
338 	poll_threads();
339 	CU_ASSERT_EQUAL(poller_ctx.run, true);
340 
341 	spdk_poller_pause(poller_ctx.poller);
342 
343 	poller_ctx.run = false;
344 	poll_threads();
345 	CU_ASSERT_EQUAL(poller_ctx.run, false);
346 
347 	spdk_poller_unregister(&poller_ctx.poller);
348 
349 	poll_threads();
350 	CU_ASSERT_EQUAL(poller_ctx.run, false);
351 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
352 
353 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
354 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
355 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
356 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
357 
358 		spdk_delay_us(delay[i]);
359 		poller_ctx.run = false;
360 		poll_threads();
361 		CU_ASSERT_EQUAL(poller_ctx.run, true);
362 
363 		spdk_poller_pause(poller_ctx.poller);
364 
365 		spdk_delay_us(delay[i]);
366 		poller_ctx.run = false;
367 		poll_threads();
368 		CU_ASSERT_EQUAL(poller_ctx.run, false);
369 
370 		spdk_poller_resume(poller_ctx.poller);
371 
372 		spdk_delay_us(delay[i]);
373 		poll_threads();
374 		CU_ASSERT_EQUAL(poller_ctx.run, true);
375 
376 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
377 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
378 
379 		spdk_delay_us(delay[i]);
380 		poller_ctx.run = false;
381 		poll_threads();
382 		CU_ASSERT_EQUAL(poller_ctx.run, false);
383 
384 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
385 
386 		poll_threads();
387 		if (delay[i] > 0) {
388 			spdk_delay_us(delay[i]);
389 			poll_threads();
390 		}
391 		CU_ASSERT_EQUAL(poller_ctx.run, true);
392 
393 		spdk_poller_unregister(&poller_ctx.poller);
394 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
395 
396 		/* Register a timed poller that pauses itself */
397 		poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]);
398 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
399 
400 		spdk_delay_us(delay[i]);
401 		poller_ctx.run = false;
402 		poll_threads();
403 		CU_ASSERT_EQUAL(poller_ctx.run, true);
404 
405 		poller_ctx.run = false;
406 		spdk_delay_us(delay[i]);
407 		poll_threads();
408 		CU_ASSERT_EQUAL(poller_ctx.run, false);
409 
410 		spdk_poller_resume(poller_ctx.poller);
411 
412 		CU_ASSERT_EQUAL(poller_ctx.run, false);
413 		spdk_delay_us(delay[i]);
414 		poll_threads();
415 		CU_ASSERT_EQUAL(poller_ctx.run, true);
416 
417 		spdk_poller_unregister(&poller_ctx.poller);
418 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
419 
420 		/* Register a timed poller that switches between pause and resume itself */
421 		poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause,
422 				    &poller_ctx, delay[i]);
423 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
424 
425 		spdk_delay_us(delay[i]);
426 		poller_ctx.run = false;
427 		poll_threads();
428 		CU_ASSERT_EQUAL(poller_ctx.run, true);
429 
430 		poller_ctx.run = false;
431 		spdk_delay_us(delay[i]);
432 		poll_threads();
433 		CU_ASSERT_EQUAL(poller_ctx.run, false);
434 
435 		spdk_poller_resume(poller_ctx.poller);
436 
437 		CU_ASSERT_EQUAL(poller_ctx.run, false);
438 		spdk_delay_us(delay[i]);
439 		poll_threads();
440 		CU_ASSERT_EQUAL(poller_ctx.run, true);
441 
442 		spdk_poller_unregister(&poller_ctx.poller);
443 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
444 	}
445 
446 	free_threads();
447 }
448 
449 static void
450 for_each_cb(void *ctx)
451 {
452 	int *count = ctx;
453 
454 	(*count)++;
455 }
456 
457 static void
458 thread_for_each(void)
459 {
460 	int count = 0;
461 	int i;
462 
463 	allocate_threads(3);
464 	set_thread(0);
465 
466 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
467 
468 	/* We have not polled thread 0 yet, so count should be 0 */
469 	CU_ASSERT(count == 0);
470 
471 	/* Poll each thread to verify the message is passed to each */
472 	for (i = 0; i < 3; i++) {
473 		poll_thread(i);
474 		CU_ASSERT(count == (i + 1));
475 	}
476 
477 	/*
478 	 * After each thread is called, the completion calls it
479 	 * one more time.
480 	 */
481 	poll_thread(0);
482 	CU_ASSERT(count == 4);
483 
484 	free_threads();
485 }
486 
487 static int
488 channel_create(void *io_device, void *ctx_buf)
489 {
490 	int *ch_count = io_device;
491 
492 	(*ch_count)++;
493 	return 0;
494 }
495 
496 static void
497 channel_destroy(void *io_device, void *ctx_buf)
498 {
499 	int *ch_count = io_device;
500 
501 	(*ch_count)--;
502 }
503 
504 static void
505 channel_msg(struct spdk_io_channel_iter *i)
506 {
507 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
508 
509 	(*msg_count)++;
510 	spdk_for_each_channel_continue(i, 0);
511 }
512 
513 static void
514 channel_cpl(struct spdk_io_channel_iter *i, int status)
515 {
516 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
517 
518 	(*msg_count)++;
519 }
520 
521 static void
522 for_each_channel_remove(void)
523 {
524 	struct spdk_io_channel *ch0, *ch1, *ch2;
525 	int ch_count = 0;
526 	int msg_count = 0;
527 
528 	allocate_threads(3);
529 	set_thread(0);
530 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
531 	ch0 = spdk_get_io_channel(&ch_count);
532 	set_thread(1);
533 	ch1 = spdk_get_io_channel(&ch_count);
534 	set_thread(2);
535 	ch2 = spdk_get_io_channel(&ch_count);
536 	CU_ASSERT(ch_count == 3);
537 
538 	/*
539 	 * Test that io_channel handles the case where we start to iterate through
540 	 *  the channels, and during the iteration, one of the channels is deleted.
541 	 * This is done in some different and sometimes non-intuitive orders, because
542 	 *  some operations are deferred and won't execute until their threads are
543 	 *  polled.
544 	 *
545 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
546 	 */
547 	set_thread(0);
548 	spdk_put_io_channel(ch0);
549 	CU_ASSERT(ch_count == 3);
550 	poll_threads();
551 	CU_ASSERT(ch_count == 2);
552 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
553 	CU_ASSERT(msg_count == 0);
554 	poll_threads();
555 	CU_ASSERT(msg_count == 3);
556 
557 	msg_count = 0;
558 
559 	/*
560 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
561 	 *  thread 0 is polled.
562 	 */
563 	ch0 = spdk_get_io_channel(&ch_count);
564 	CU_ASSERT(ch_count == 3);
565 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
566 	spdk_put_io_channel(ch0);
567 	CU_ASSERT(ch_count == 3);
568 
569 	poll_threads();
570 	CU_ASSERT(ch_count == 2);
571 	CU_ASSERT(msg_count == 4);
572 	set_thread(1);
573 	spdk_put_io_channel(ch1);
574 	CU_ASSERT(ch_count == 2);
575 	set_thread(2);
576 	spdk_put_io_channel(ch2);
577 	CU_ASSERT(ch_count == 2);
578 	poll_threads();
579 	CU_ASSERT(ch_count == 0);
580 
581 	spdk_io_device_unregister(&ch_count, NULL);
582 	poll_threads();
583 
584 	free_threads();
585 }
586 
587 struct unreg_ctx {
588 	bool	ch_done;
589 	bool	foreach_done;
590 };
591 
592 static void
593 unreg_ch_done(struct spdk_io_channel_iter *i)
594 {
595 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
596 
597 	ctx->ch_done = true;
598 
599 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
600 	spdk_for_each_channel_continue(i, 0);
601 }
602 
603 static void
604 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
605 {
606 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
607 
608 	ctx->foreach_done = true;
609 }
610 
611 static void
612 for_each_channel_unreg(void)
613 {
614 	struct spdk_io_channel *ch0;
615 	struct io_device *dev;
616 	struct unreg_ctx ctx = {};
617 	int io_target = 0;
618 
619 	allocate_threads(1);
620 	set_thread(0);
621 	CU_ASSERT(RB_EMPTY(&g_io_devices));
622 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
623 	CU_ASSERT(!RB_EMPTY(&g_io_devices));
624 	dev = RB_MIN(io_device_tree, &g_io_devices);
625 	SPDK_CU_ASSERT_FATAL(dev != NULL);
626 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
627 	ch0 = spdk_get_io_channel(&io_target);
628 
629 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
630 
631 	/*
632 	 * There is already a device registered at &io_target, so a new io_device should not
633 	 *  have been added to g_io_devices.
634 	 */
635 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
636 	CU_ASSERT(RB_NEXT(io_device_tree, &g_io_devices, dev) == NULL);
637 
638 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
639 	spdk_io_device_unregister(&io_target, NULL);
640 	/*
641 	 * There is an outstanding foreach call on the io_device, so the unregister should not
642 	 *  have immediately removed the device.
643 	 */
644 	CU_ASSERT(dev == RB_MIN(io_device_tree, &g_io_devices));
645 
646 	poll_thread(0);
647 	CU_ASSERT(ctx.ch_done == true);
648 	CU_ASSERT(ctx.foreach_done == true);
649 
650 	/*
651 	 * There are no more foreach operations outstanding, so the device should be
652 	 * unregistered.
653 	 */
654 	CU_ASSERT(RB_EMPTY(&g_io_devices));
655 
656 	set_thread(0);
657 	spdk_put_io_channel(ch0);
658 
659 	poll_threads();
660 
661 	free_threads();
662 }
663 
664 static void
665 thread_name(void)
666 {
667 	struct spdk_thread *thread;
668 	const char *name;
669 
670 	spdk_thread_lib_init(NULL, 0);
671 
672 	/* Create thread with no name, which automatically generates one */
673 	thread = spdk_thread_create(NULL, NULL);
674 	spdk_set_thread(thread);
675 	thread = spdk_get_thread();
676 	SPDK_CU_ASSERT_FATAL(thread != NULL);
677 	name = spdk_thread_get_name(thread);
678 	CU_ASSERT(name != NULL);
679 	spdk_thread_exit(thread);
680 	while (!spdk_thread_is_exited(thread)) {
681 		spdk_thread_poll(thread, 0, 0);
682 	}
683 	spdk_thread_destroy(thread);
684 
685 	/* Create thread named "test_thread" */
686 	thread = spdk_thread_create("test_thread", NULL);
687 	spdk_set_thread(thread);
688 	thread = spdk_get_thread();
689 	SPDK_CU_ASSERT_FATAL(thread != NULL);
690 	name = spdk_thread_get_name(thread);
691 	SPDK_CU_ASSERT_FATAL(name != NULL);
692 	CU_ASSERT(strcmp(name, "test_thread") == 0);
693 	spdk_thread_exit(thread);
694 	while (!spdk_thread_is_exited(thread)) {
695 		spdk_thread_poll(thread, 0, 0);
696 	}
697 	spdk_thread_destroy(thread);
698 
699 	spdk_thread_lib_fini();
700 }
701 
702 static uint64_t g_device1;
703 static uint64_t g_device2;
704 static uint64_t g_device3;
705 
706 static uint64_t g_ctx1 = 0x1111;
707 static uint64_t g_ctx2 = 0x2222;
708 
709 static int g_create_cb_calls = 0;
710 static int g_destroy_cb_calls = 0;
711 
712 static int
713 create_cb_1(void *io_device, void *ctx_buf)
714 {
715 	CU_ASSERT(io_device == &g_device1);
716 	*(uint64_t *)ctx_buf = g_ctx1;
717 	g_create_cb_calls++;
718 	return 0;
719 }
720 
721 static void
722 destroy_cb_1(void *io_device, void *ctx_buf)
723 {
724 	CU_ASSERT(io_device == &g_device1);
725 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
726 	g_destroy_cb_calls++;
727 }
728 
729 static int
730 create_cb_2(void *io_device, void *ctx_buf)
731 {
732 	CU_ASSERT(io_device == &g_device2);
733 	*(uint64_t *)ctx_buf = g_ctx2;
734 	g_create_cb_calls++;
735 	return 0;
736 }
737 
738 static void
739 destroy_cb_2(void *io_device, void *ctx_buf)
740 {
741 	CU_ASSERT(io_device == &g_device2);
742 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
743 	g_destroy_cb_calls++;
744 }
745 
746 static void
747 channel(void)
748 {
749 	struct spdk_io_channel *ch1, *ch2;
750 	void *ctx;
751 
752 	allocate_threads(1);
753 	set_thread(0);
754 
755 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
756 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
757 
758 	g_create_cb_calls = 0;
759 	ch1 = spdk_get_io_channel(&g_device1);
760 	CU_ASSERT(g_create_cb_calls == 1);
761 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
762 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
763 
764 	g_create_cb_calls = 0;
765 	ch2 = spdk_get_io_channel(&g_device1);
766 	CU_ASSERT(g_create_cb_calls == 0);
767 	CU_ASSERT(ch1 == ch2);
768 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
769 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
770 
771 	g_destroy_cb_calls = 0;
772 	spdk_put_io_channel(ch2);
773 	poll_threads();
774 	CU_ASSERT(g_destroy_cb_calls == 0);
775 
776 	g_create_cb_calls = 0;
777 	ch2 = spdk_get_io_channel(&g_device2);
778 	CU_ASSERT(g_create_cb_calls == 1);
779 	CU_ASSERT(ch1 != ch2);
780 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
781 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
782 
783 	ctx = spdk_io_channel_get_ctx(ch2);
784 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
785 
786 	g_destroy_cb_calls = 0;
787 	spdk_put_io_channel(ch1);
788 	poll_threads();
789 	CU_ASSERT(g_destroy_cb_calls == 1);
790 
791 	g_destroy_cb_calls = 0;
792 	spdk_put_io_channel(ch2);
793 	poll_threads();
794 	CU_ASSERT(g_destroy_cb_calls == 1);
795 
796 	ch1 = spdk_get_io_channel(&g_device3);
797 	CU_ASSERT(ch1 == NULL);
798 
799 	spdk_io_device_unregister(&g_device1, NULL);
800 	poll_threads();
801 	spdk_io_device_unregister(&g_device2, NULL);
802 	poll_threads();
803 	CU_ASSERT(RB_EMPTY(&g_io_devices));
804 	free_threads();
805 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
806 }
807 
808 static int
809 create_cb(void *io_device, void *ctx_buf)
810 {
811 	uint64_t *refcnt = (uint64_t *)ctx_buf;
812 
813 	CU_ASSERT(*refcnt == 0);
814 	*refcnt = 1;
815 
816 	return 0;
817 }
818 
819 static void
820 destroy_cb(void *io_device, void *ctx_buf)
821 {
822 	uint64_t *refcnt = (uint64_t *)ctx_buf;
823 
824 	CU_ASSERT(*refcnt == 1);
825 	*refcnt = 0;
826 }
827 
828 /**
829  * This test is checking that a sequence of get, put, get, put without allowing
830  * the deferred put operation to complete doesn't result in releasing the memory
831  * for the channel twice.
832  */
833 static void
834 channel_destroy_races(void)
835 {
836 	uint64_t device;
837 	struct spdk_io_channel *ch;
838 
839 	allocate_threads(1);
840 	set_thread(0);
841 
842 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
843 
844 	ch = spdk_get_io_channel(&device);
845 	SPDK_CU_ASSERT_FATAL(ch != NULL);
846 
847 	spdk_put_io_channel(ch);
848 
849 	ch = spdk_get_io_channel(&device);
850 	SPDK_CU_ASSERT_FATAL(ch != NULL);
851 
852 	spdk_put_io_channel(ch);
853 	poll_threads();
854 
855 	spdk_io_device_unregister(&device, NULL);
856 	poll_threads();
857 
858 	CU_ASSERT(RB_EMPTY(&g_io_devices));
859 	free_threads();
860 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
861 }
862 
863 static void
864 thread_exit_test(void)
865 {
866 	struct spdk_thread *thread;
867 	struct spdk_io_channel *ch;
868 	struct spdk_poller *poller1, *poller2;
869 	void *ctx;
870 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
871 	int rc __attribute__((unused));
872 
873 	MOCK_SET(spdk_get_ticks, 10);
874 	MOCK_SET(spdk_get_ticks_hz, 1);
875 
876 	allocate_threads(4);
877 
878 	/* Test if all pending messages are reaped for the exiting thread, and the
879 	 * thread moves to the exited state.
880 	 */
881 	set_thread(0);
882 	thread = spdk_get_thread();
883 
884 	/* Sending message to thread 0 will be accepted. */
885 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
886 	CU_ASSERT(rc == 0);
887 	CU_ASSERT(!done1);
888 
889 	/* Move thread 0 to the exiting state. */
890 	spdk_thread_exit(thread);
891 
892 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
893 
894 	/* Sending message to thread 0 will be still accepted. */
895 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
896 	CU_ASSERT(rc == 0);
897 
898 	/* Thread 0 will reap pending messages. */
899 	poll_thread(0);
900 	CU_ASSERT(done1 == true);
901 	CU_ASSERT(done2 == true);
902 
903 	/* Thread 0 will move to the exited state. */
904 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
905 
906 	/* Test releasing I/O channel is reaped even after the thread moves to
907 	 * the exiting state
908 	 */
909 	set_thread(1);
910 
911 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
912 
913 	g_create_cb_calls = 0;
914 	ch = spdk_get_io_channel(&g_device1);
915 	CU_ASSERT(g_create_cb_calls == 1);
916 	SPDK_CU_ASSERT_FATAL(ch != NULL);
917 
918 	ctx = spdk_io_channel_get_ctx(ch);
919 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
920 
921 	g_destroy_cb_calls = 0;
922 	spdk_put_io_channel(ch);
923 
924 	thread = spdk_get_thread();
925 	spdk_thread_exit(thread);
926 
927 	/* Thread 1 will not move to the exited state yet because I/O channel release
928 	 * does not complete yet.
929 	 */
930 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
931 
932 	/* Thread 1 will be able to get the another reference of I/O channel
933 	 * even after the thread moves to the exiting state.
934 	 */
935 	g_create_cb_calls = 0;
936 	ch = spdk_get_io_channel(&g_device1);
937 
938 	CU_ASSERT(g_create_cb_calls == 0);
939 	SPDK_CU_ASSERT_FATAL(ch != NULL);
940 
941 	ctx = spdk_io_channel_get_ctx(ch);
942 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
943 
944 	spdk_put_io_channel(ch);
945 
946 	poll_threads();
947 	CU_ASSERT(g_destroy_cb_calls == 1);
948 
949 	/* Thread 1 will move to the exited state after I/O channel is released.
950 	 * are released.
951 	 */
952 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
953 
954 	spdk_io_device_unregister(&g_device1, NULL);
955 	poll_threads();
956 
957 	/* Test if unregistering poller is reaped for the exiting thread, and the
958 	 * thread moves to the exited thread.
959 	 */
960 	set_thread(2);
961 	thread = spdk_get_thread();
962 
963 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
964 	CU_ASSERT(poller1 != NULL);
965 
966 	spdk_poller_unregister(&poller1);
967 
968 	spdk_thread_exit(thread);
969 
970 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
971 
972 	poll_threads();
973 
974 	CU_ASSERT(poller1_run == false);
975 	CU_ASSERT(poller2_run == true);
976 
977 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
978 
979 	spdk_poller_unregister(&poller2);
980 
981 	poll_threads();
982 
983 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
984 
985 	/* Test if the exiting thread is exited forcefully after timeout. */
986 	set_thread(3);
987 	thread = spdk_get_thread();
988 
989 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
990 	CU_ASSERT(poller1 != NULL);
991 
992 	spdk_thread_exit(thread);
993 
994 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
995 
996 	MOCK_SET(spdk_get_ticks, 11);
997 
998 	poll_threads();
999 
1000 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1001 
1002 	/* Cause timeout forcefully. */
1003 	MOCK_SET(spdk_get_ticks, 15);
1004 
1005 	poll_threads();
1006 
1007 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1008 
1009 	spdk_poller_unregister(&poller1);
1010 
1011 	poll_threads();
1012 
1013 	MOCK_CLEAR(spdk_get_ticks);
1014 	MOCK_CLEAR(spdk_get_ticks_hz);
1015 
1016 	free_threads();
1017 }
1018 
1019 static int
1020 poller_run_idle(void *ctx)
1021 {
1022 	uint64_t delay_us = (uint64_t)ctx;
1023 
1024 	spdk_delay_us(delay_us);
1025 
1026 	return 0;
1027 }
1028 
1029 static int
1030 poller_run_busy(void *ctx)
1031 {
1032 	uint64_t delay_us = (uint64_t)ctx;
1033 
1034 	spdk_delay_us(delay_us);
1035 
1036 	return 1;
1037 }
1038 
1039 static void
1040 thread_update_stats_test(void)
1041 {
1042 	struct spdk_poller	*poller;
1043 	struct spdk_thread	*thread;
1044 
1045 	MOCK_SET(spdk_get_ticks, 10);
1046 
1047 	allocate_threads(1);
1048 
1049 	set_thread(0);
1050 	thread = spdk_get_thread();
1051 
1052 	CU_ASSERT(thread->tsc_last == 10);
1053 	CU_ASSERT(thread->stats.idle_tsc == 0);
1054 	CU_ASSERT(thread->stats.busy_tsc == 0);
1055 
1056 	/* Test if idle_tsc is updated expectedly. */
1057 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1058 	CU_ASSERT(poller != NULL);
1059 
1060 	spdk_delay_us(100);
1061 
1062 	poll_thread_times(0, 1);
1063 
1064 	CU_ASSERT(thread->tsc_last == 1110);
1065 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1066 	CU_ASSERT(thread->stats.busy_tsc == 0);
1067 
1068 	spdk_delay_us(100);
1069 
1070 	poll_thread_times(0, 1);
1071 
1072 	CU_ASSERT(thread->tsc_last == 2210);
1073 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1074 	CU_ASSERT(thread->stats.busy_tsc == 0);
1075 
1076 	spdk_poller_unregister(&poller);
1077 
1078 	/* Test if busy_tsc is updated expectedly. */
1079 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1080 	CU_ASSERT(poller != NULL);
1081 
1082 	spdk_delay_us(10000);
1083 
1084 	poll_thread_times(0, 1);
1085 
1086 	CU_ASSERT(thread->tsc_last == 112210);
1087 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1088 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1089 
1090 	spdk_delay_us(10000);
1091 
1092 	poll_thread_times(0, 1);
1093 
1094 	CU_ASSERT(thread->tsc_last == 222210);
1095 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1096 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1097 
1098 	spdk_poller_unregister(&poller);
1099 
1100 	MOCK_CLEAR(spdk_get_ticks);
1101 
1102 	free_threads();
1103 }
1104 
1105 struct ut_nested_ch {
1106 	struct spdk_io_channel *child;
1107 	struct spdk_poller *poller;
1108 };
1109 
1110 struct ut_nested_dev {
1111 	struct ut_nested_dev *child;
1112 };
1113 
1114 static int
1115 ut_null_poll(void *ctx)
1116 {
1117 	return -1;
1118 }
1119 
1120 static int
1121 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1122 {
1123 	struct ut_nested_ch *_ch = ctx_buf;
1124 	struct ut_nested_dev *_dev = io_device;
1125 	struct ut_nested_dev *_child;
1126 
1127 	_child = _dev->child;
1128 
1129 	if (_child != NULL) {
1130 		_ch->child = spdk_get_io_channel(_child);
1131 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1132 	} else {
1133 		_ch->child = NULL;
1134 	}
1135 
1136 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1137 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1138 
1139 	return 0;
1140 }
1141 
1142 static void
1143 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1144 {
1145 	struct ut_nested_ch *_ch = ctx_buf;
1146 	struct spdk_io_channel *child;
1147 
1148 	child = _ch->child;
1149 	if (child != NULL) {
1150 		spdk_put_io_channel(child);
1151 	}
1152 
1153 	spdk_poller_unregister(&_ch->poller);
1154 }
1155 
1156 static void
1157 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1158 {
1159 	CU_ASSERT(ch->ref == 1);
1160 	CU_ASSERT(ch->dev == dev);
1161 	CU_ASSERT(dev->refcnt == 1);
1162 }
1163 
1164 static void
1165 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1166 {
1167 	CU_ASSERT(ch->ref == 0);
1168 	CU_ASSERT(ch->destroy_ref == 1);
1169 	CU_ASSERT(dev->refcnt == 1);
1170 }
1171 
1172 static void
1173 ut_check_nested_ch_destroy_post(struct io_device *dev)
1174 {
1175 	CU_ASSERT(dev->refcnt == 0);
1176 }
1177 
1178 static void
1179 ut_check_nested_poller_register(struct spdk_poller *poller)
1180 {
1181 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1182 }
1183 
1184 static void
1185 nested_channel(void)
1186 {
1187 	struct ut_nested_dev _dev1, _dev2, _dev3;
1188 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1189 	struct io_device *dev1, *dev2, *dev3;
1190 	struct spdk_io_channel *ch1, *ch2, *ch3;
1191 	struct spdk_poller *poller;
1192 	struct spdk_thread *thread;
1193 
1194 	allocate_threads(1);
1195 	set_thread(0);
1196 
1197 	thread = spdk_get_thread();
1198 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1199 
1200 	_dev1.child = &_dev2;
1201 	_dev2.child = &_dev3;
1202 	_dev3.child = NULL;
1203 
1204 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1205 				sizeof(struct ut_nested_ch), "dev1");
1206 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1207 				sizeof(struct ut_nested_ch), "dev2");
1208 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1209 				sizeof(struct ut_nested_ch), "dev3");
1210 
1211 	dev1 = io_device_get(&_dev1);
1212 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1213 	dev2 = io_device_get(&_dev2);
1214 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1215 	dev3 = io_device_get(&_dev3);
1216 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1217 
1218 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1219 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1220 	 */
1221 	ch1 = spdk_get_io_channel(&_dev1);
1222 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1223 
1224 	_ch1 = spdk_io_channel_get_ctx(ch1);
1225 	ch2 = _ch1->child;
1226 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1227 
1228 	_ch2 = spdk_io_channel_get_ctx(ch2);
1229 	ch3 = _ch2->child;
1230 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1231 
1232 	_ch3 = spdk_io_channel_get_ctx(ch3);
1233 	CU_ASSERT(_ch3->child == NULL);
1234 
1235 	ut_check_nested_ch_create(ch1, dev1);
1236 	ut_check_nested_ch_create(ch2, dev2);
1237 	ut_check_nested_ch_create(ch3, dev3);
1238 
1239 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1240 
1241 	ut_check_nested_poller_register(poller);
1242 	ut_check_nested_poller_register(_ch1->poller);
1243 	ut_check_nested_poller_register(_ch2->poller);
1244 	ut_check_nested_poller_register(_ch3->poller);
1245 
1246 	spdk_poller_unregister(&poller);
1247 	poll_thread_times(0, 1);
1248 
1249 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1250 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1251 	 */
1252 	spdk_put_io_channel(ch1);
1253 
1254 	/* Start exiting the current thread after unregistering the non-nested
1255 	 * I/O channel.
1256 	 */
1257 	spdk_thread_exit(thread);
1258 
1259 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1260 	poll_thread_times(0, 1);
1261 	ut_check_nested_ch_destroy_post(dev1);
1262 
1263 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1264 
1265 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1266 	poll_thread_times(0, 1);
1267 	ut_check_nested_ch_destroy_post(dev2);
1268 
1269 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1270 
1271 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1272 	poll_thread_times(0, 1);
1273 	ut_check_nested_ch_destroy_post(dev3);
1274 
1275 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1276 
1277 	spdk_io_device_unregister(&_dev1, NULL);
1278 	spdk_io_device_unregister(&_dev2, NULL);
1279 	spdk_io_device_unregister(&_dev3, NULL);
1280 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1281 
1282 	free_threads();
1283 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1284 }
1285 
1286 static int
1287 create_cb2(void *io_device, void *ctx_buf)
1288 {
1289 	uint64_t *devcnt = (uint64_t *)io_device;
1290 
1291 	*devcnt += 1;
1292 
1293 	return 0;
1294 }
1295 
1296 static void
1297 destroy_cb2(void *io_device, void *ctx_buf)
1298 {
1299 	uint64_t *devcnt = (uint64_t *)io_device;
1300 
1301 	CU_ASSERT(*devcnt > 0);
1302 	*devcnt -= 1;
1303 }
1304 
1305 static void
1306 unregister_cb2(void *io_device)
1307 {
1308 	uint64_t *devcnt = (uint64_t *)io_device;
1309 
1310 	CU_ASSERT(*devcnt == 0);
1311 }
1312 
1313 static void
1314 device_unregister_and_thread_exit_race(void)
1315 {
1316 	uint64_t device = 0;
1317 	struct spdk_io_channel *ch1, *ch2;
1318 	struct spdk_thread *thread1, *thread2;
1319 
1320 	/* Create two threads and each thread gets a channel from the same device. */
1321 	allocate_threads(2);
1322 	set_thread(0);
1323 
1324 	thread1 = spdk_get_thread();
1325 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1326 
1327 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1328 
1329 	ch1 = spdk_get_io_channel(&device);
1330 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1331 
1332 	set_thread(1);
1333 
1334 	thread2 = spdk_get_thread();
1335 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1336 
1337 	ch2 = spdk_get_io_channel(&device);
1338 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1339 
1340 	set_thread(0);
1341 
1342 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1343 	 * and a device are released.
1344 	 */
1345 	spdk_thread_exit(thread1);
1346 	poll_thread(0);
1347 
1348 	spdk_put_io_channel(ch1);
1349 
1350 	spdk_io_device_unregister(&device, unregister_cb2);
1351 	poll_thread(0);
1352 
1353 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1354 
1355 	set_thread(1);
1356 
1357 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1358 	 * is released.
1359 	 */
1360 	spdk_thread_exit(thread2);
1361 	poll_thread(1);
1362 
1363 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1364 
1365 	spdk_put_io_channel(ch2);
1366 	poll_thread(1);
1367 
1368 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1369 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1370 
1371 	poll_thread(0);
1372 
1373 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1374 
1375 	free_threads();
1376 }
1377 
1378 static int
1379 dummy_poller(void *arg)
1380 {
1381 	return SPDK_POLLER_IDLE;
1382 }
1383 
1384 static void
1385 cache_closest_timed_poller(void)
1386 {
1387 	struct spdk_thread *thread;
1388 	struct spdk_poller *poller1, *poller2, *poller3, *tmp;
1389 
1390 	allocate_threads(1);
1391 	set_thread(0);
1392 
1393 	thread = spdk_get_thread();
1394 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1395 
1396 	poller1 = spdk_poller_register(dummy_poller, NULL, 1000);
1397 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1398 
1399 	poller2 = spdk_poller_register(dummy_poller, NULL, 1500);
1400 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1401 
1402 	poller3 = spdk_poller_register(dummy_poller, NULL, 1800);
1403 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1404 
1405 	poll_threads();
1406 
1407 	/* When multiple timed pollers are inserted, the cache should
1408 	 * have the closest timed poller.
1409 	 */
1410 	CU_ASSERT(thread->first_timed_poller == poller1);
1411 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1412 
1413 	spdk_delay_us(1000);
1414 	poll_threads();
1415 
1416 	CU_ASSERT(thread->first_timed_poller == poller2);
1417 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2);
1418 
1419 	/* If we unregister a timed poller by spdk_poller_unregister()
1420 	 * when it is waiting, it is marked as being unregistered and
1421 	 * is actually unregistered when it is expired.
1422 	 *
1423 	 * Hence if we unregister the closest timed poller when it is waiting,
1424 	 * the cache is not updated to the next timed poller until it is expired.
1425 	 */
1426 	tmp = poller2;
1427 
1428 	spdk_poller_unregister(&poller2);
1429 	CU_ASSERT(poller2 == NULL);
1430 
1431 	spdk_delay_us(499);
1432 	poll_threads();
1433 
1434 	CU_ASSERT(thread->first_timed_poller == tmp);
1435 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp);
1436 
1437 	spdk_delay_us(1);
1438 	poll_threads();
1439 
1440 	CU_ASSERT(thread->first_timed_poller == poller3);
1441 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1442 
1443 	/* If we pause a timed poller by spdk_poller_pause() when it is waiting,
1444 	 * it is marked as being paused and is actually paused when it is expired.
1445 	 *
1446 	 * Hence if we pause the closest timed poller when it is waiting, the cache
1447 	 * is not updated to the next timed poller until it is expired.
1448 	 */
1449 	spdk_poller_pause(poller3);
1450 
1451 	spdk_delay_us(299);
1452 	poll_threads();
1453 
1454 	CU_ASSERT(thread->first_timed_poller == poller3);
1455 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3);
1456 
1457 	spdk_delay_us(1);
1458 	poll_threads();
1459 
1460 	CU_ASSERT(thread->first_timed_poller == poller1);
1461 	CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1);
1462 
1463 	/* After unregistering all timed pollers, the cache should
1464 	 * be NULL.
1465 	 */
1466 	spdk_poller_unregister(&poller1);
1467 	spdk_poller_unregister(&poller3);
1468 
1469 	spdk_delay_us(200);
1470 	poll_threads();
1471 
1472 	CU_ASSERT(thread->first_timed_poller == NULL);
1473 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1474 
1475 	free_threads();
1476 }
1477 
1478 static void
1479 multi_timed_pollers_have_same_expiration(void)
1480 {
1481 	struct spdk_thread *thread;
1482 	struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp;
1483 	uint64_t start_ticks;
1484 
1485 	allocate_threads(1);
1486 	set_thread(0);
1487 
1488 	thread = spdk_get_thread();
1489 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1490 
1491 	/*
1492 	 * case 1: multiple timed pollers have the same next_run_tick.
1493 	 */
1494 	start_ticks = spdk_get_ticks();
1495 
1496 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1497 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1498 
1499 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1500 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1501 
1502 	poller3 = spdk_poller_register(dummy_poller, NULL, 1000);
1503 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1504 
1505 	poller4 = spdk_poller_register(dummy_poller, NULL, 1500);
1506 	SPDK_CU_ASSERT_FATAL(poller4 != NULL);
1507 
1508 	/* poller1 and poller2 have the same next_run_tick but cache has poller1
1509 	 * because poller1 is registered earlier than poller2.
1510 	 */
1511 	CU_ASSERT(thread->first_timed_poller == poller1);
1512 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1513 	CU_ASSERT(poller2->next_run_tick == start_ticks + 500);
1514 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1515 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1516 
1517 	/* after 500 usec, poller1 and poller2 are expired. */
1518 	spdk_delay_us(500);
1519 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1520 	poll_threads();
1521 
1522 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1523 	 * has poller3 because poller3 is not expired yet.
1524 	 */
1525 	CU_ASSERT(thread->first_timed_poller == poller3);
1526 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1527 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1000);
1528 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1529 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1530 
1531 	/* after 500 usec, poller1, poller2, and poller3 are expired. */
1532 	spdk_delay_us(500);
1533 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1534 	poll_threads();
1535 
1536 	/* poller1, poller2, and poller4 have the same next_run_tick but cache
1537 	 * has poller4 because poller4 is not expired yet.
1538 	 */
1539 	CU_ASSERT(thread->first_timed_poller == poller4);
1540 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1541 	CU_ASSERT(poller2->next_run_tick == start_ticks + 1500);
1542 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1543 	CU_ASSERT(poller4->next_run_tick == start_ticks + 1500);
1544 
1545 	/* after 500 usec, poller1, poller2, and poller4 are expired. */
1546 	spdk_delay_us(500);
1547 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1548 	poll_threads();
1549 
1550 	/* poller1, poller2, and poller3 have the same next_run_tick but cache
1551 	 * has poller3 because poller3 is updated earlier than poller1 and poller2.
1552 	 */
1553 	CU_ASSERT(thread->first_timed_poller == poller3);
1554 	CU_ASSERT(poller1->next_run_tick == start_ticks + 2000);
1555 	CU_ASSERT(poller2->next_run_tick == start_ticks + 2000);
1556 	CU_ASSERT(poller3->next_run_tick == start_ticks + 2000);
1557 	CU_ASSERT(poller4->next_run_tick == start_ticks + 3000);
1558 
1559 	spdk_poller_unregister(&poller1);
1560 	spdk_poller_unregister(&poller2);
1561 	spdk_poller_unregister(&poller3);
1562 	spdk_poller_unregister(&poller4);
1563 
1564 	spdk_delay_us(1500);
1565 	CU_ASSERT(spdk_get_ticks() == start_ticks + 3000);
1566 	poll_threads();
1567 
1568 	CU_ASSERT(thread->first_timed_poller == NULL);
1569 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1570 
1571 	/*
1572 	 * case 2: unregister timed pollers while multiple timed pollers are registered.
1573 	 */
1574 	start_ticks = spdk_get_ticks();
1575 
1576 	poller1 = spdk_poller_register(dummy_poller, NULL, 500);
1577 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1578 
1579 	CU_ASSERT(thread->first_timed_poller == poller1);
1580 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1581 
1582 	/* after 250 usec, register poller2 and poller3. */
1583 	spdk_delay_us(250);
1584 	CU_ASSERT(spdk_get_ticks() == start_ticks + 250);
1585 
1586 	poller2 = spdk_poller_register(dummy_poller, NULL, 500);
1587 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1588 
1589 	poller3 = spdk_poller_register(dummy_poller, NULL, 750);
1590 	SPDK_CU_ASSERT_FATAL(poller3 != NULL);
1591 
1592 	CU_ASSERT(thread->first_timed_poller == poller1);
1593 	CU_ASSERT(poller1->next_run_tick == start_ticks + 500);
1594 	CU_ASSERT(poller2->next_run_tick == start_ticks + 750);
1595 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1596 
1597 	/* unregister poller2 which is not the closest. */
1598 	tmp = poller2;
1599 	spdk_poller_unregister(&poller2);
1600 
1601 	/* after 250 usec, poller1 is expired. */
1602 	spdk_delay_us(250);
1603 	CU_ASSERT(spdk_get_ticks() == start_ticks + 500);
1604 	poll_threads();
1605 
1606 	/* poller2 is not unregistered yet because it is not expired. */
1607 	CU_ASSERT(thread->first_timed_poller == tmp);
1608 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1609 	CU_ASSERT(tmp->next_run_tick == start_ticks + 750);
1610 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1611 
1612 	spdk_delay_us(250);
1613 	CU_ASSERT(spdk_get_ticks() == start_ticks + 750);
1614 	poll_threads();
1615 
1616 	CU_ASSERT(thread->first_timed_poller == poller3);
1617 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1000);
1618 	CU_ASSERT(poller3->next_run_tick == start_ticks + 1000);
1619 
1620 	spdk_poller_unregister(&poller3);
1621 
1622 	spdk_delay_us(250);
1623 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1000);
1624 	poll_threads();
1625 
1626 	CU_ASSERT(thread->first_timed_poller == poller1);
1627 	CU_ASSERT(poller1->next_run_tick == start_ticks + 1500);
1628 
1629 	spdk_poller_unregister(&poller1);
1630 
1631 	spdk_delay_us(500);
1632 	CU_ASSERT(spdk_get_ticks() == start_ticks + 1500);
1633 	poll_threads();
1634 
1635 	CU_ASSERT(thread->first_timed_poller == NULL);
1636 	CU_ASSERT(RB_EMPTY(&thread->timed_pollers));
1637 
1638 	free_threads();
1639 }
1640 
1641 static int
1642 dummy_create_cb(void *io_device, void *ctx_buf)
1643 {
1644 	return 0;
1645 }
1646 
1647 static void
1648 dummy_destroy_cb(void *io_device, void *ctx_buf)
1649 {
1650 }
1651 
1652 /* We had a bug that the compare function for the io_device tree
1653  * did not work as expected because subtraction caused overflow
1654  * when the difference between two keys was more than 32 bits.
1655  * This test case verifies the fix for the bug.
1656  */
1657 static void
1658 io_device_lookup(void)
1659 {
1660 	struct io_device dev1, dev2, *dev;
1661 	struct spdk_io_channel *ch;
1662 
1663 	/* The compare function io_device_cmp() had a overflow bug.
1664 	 * Verify the fix first.
1665 	 */
1666 	dev1.io_device = (void *)0x7FFFFFFF;
1667 	dev2.io_device = NULL;
1668 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1669 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1670 
1671 	/* Check if overflow due to 32 bits does not occur. */
1672 	dev1.io_device = (void *)0x80000000;
1673 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1674 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1675 
1676 	dev1.io_device = (void *)0x100000000;
1677 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1678 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1679 
1680 	dev1.io_device = (void *)0x8000000000000000;
1681 	CU_ASSERT(io_device_cmp(&dev1, &dev2) > 0);
1682 	CU_ASSERT(io_device_cmp(&dev2, &dev1) < 0);
1683 
1684 	allocate_threads(1);
1685 	set_thread(0);
1686 
1687 	spdk_io_device_register((void *)0x1, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1688 	spdk_io_device_register((void *)0x7FFFFFFF, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1689 	spdk_io_device_register((void *)0x80000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1690 	spdk_io_device_register((void *)0x100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1691 	spdk_io_device_register((void *)0x8000000000000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1692 	spdk_io_device_register((void *)0x8000000100000000, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1693 	spdk_io_device_register((void *)UINT64_MAX, dummy_create_cb, dummy_destroy_cb, 0, NULL);
1694 
1695 	/* RB_MIN and RB_NEXT should return devs in ascending order by addresses.
1696 	 * RB_FOREACH uses RB_MIN and RB_NEXT internally.
1697 	 */
1698 	dev = RB_MIN(io_device_tree, &g_io_devices);
1699 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1700 	CU_ASSERT(dev->io_device == (void *)0x1);
1701 
1702 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1703 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1704 	CU_ASSERT(dev->io_device == (void *)0x7FFFFFFF);
1705 
1706 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1707 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1708 	CU_ASSERT(dev->io_device == (void *)0x80000000);
1709 
1710 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1711 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1712 	CU_ASSERT(dev->io_device == (void *)0x100000000);
1713 
1714 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1715 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1716 	CU_ASSERT(dev->io_device == (void *)0x8000000000000000);
1717 
1718 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1719 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1720 	CU_ASSERT(dev->io_device == (void *)0x8000000100000000);
1721 
1722 	dev = RB_NEXT(io_device_tree, &g_io_devices, dev);
1723 	SPDK_CU_ASSERT_FATAL(dev != NULL);
1724 	CU_ASSERT(dev->io_device == (void *)UINT64_MAX);
1725 
1726 	/* Verify spdk_get_io_channel() creates io_channels associated with the
1727 	 * correct io_devices.
1728 	 */
1729 	ch = spdk_get_io_channel((void *)0x1);
1730 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1731 	CU_ASSERT(ch->dev->io_device == (void *)0x1);
1732 	spdk_put_io_channel(ch);
1733 
1734 	ch = spdk_get_io_channel((void *)0x7FFFFFFF);
1735 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1736 	CU_ASSERT(ch->dev->io_device == (void *)0x7FFFFFFF);
1737 	spdk_put_io_channel(ch);
1738 
1739 	ch = spdk_get_io_channel((void *)0x80000000);
1740 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1741 	CU_ASSERT(ch->dev->io_device == (void *)0x80000000);
1742 	spdk_put_io_channel(ch);
1743 
1744 	ch = spdk_get_io_channel((void *)0x100000000);
1745 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1746 	CU_ASSERT(ch->dev->io_device == (void *)0x100000000);
1747 	spdk_put_io_channel(ch);
1748 
1749 	ch = spdk_get_io_channel((void *)0x8000000000000000);
1750 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1751 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000000000000);
1752 	spdk_put_io_channel(ch);
1753 
1754 	ch = spdk_get_io_channel((void *)0x8000000100000000);
1755 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1756 	CU_ASSERT(ch->dev->io_device == (void *)0x8000000100000000);
1757 	spdk_put_io_channel(ch);
1758 
1759 	ch = spdk_get_io_channel((void *)UINT64_MAX);
1760 	SPDK_CU_ASSERT_FATAL(ch != NULL);
1761 	CU_ASSERT(ch->dev->io_device == (void *)UINT64_MAX);
1762 	spdk_put_io_channel(ch);
1763 
1764 	poll_threads();
1765 
1766 	spdk_io_device_unregister((void *)0x1, NULL);
1767 	spdk_io_device_unregister((void *)0x7FFFFFFF, NULL);
1768 	spdk_io_device_unregister((void *)0x80000000, NULL);
1769 	spdk_io_device_unregister((void *)0x100000000, NULL);
1770 	spdk_io_device_unregister((void *)0x8000000000000000, NULL);
1771 	spdk_io_device_unregister((void *)0x8000000100000000, NULL);
1772 	spdk_io_device_unregister((void *)UINT64_MAX, NULL);
1773 
1774 	poll_threads();
1775 
1776 	CU_ASSERT(RB_EMPTY(&g_io_devices));
1777 
1778 	free_threads();
1779 }
1780 
1781 static enum spin_error g_spin_err;
1782 static uint32_t g_spin_err_count = 0;
1783 
1784 static void
1785 ut_track_abort(enum spin_error err)
1786 {
1787 	g_spin_err = err;
1788 	g_spin_err_count++;
1789 }
1790 
1791 static void
1792 spdk_spin(void)
1793 {
1794 	struct spdk_spinlock lock;
1795 
1796 	g_spin_abort_fn = ut_track_abort;
1797 
1798 	/* Do not need to be on an SPDK thread to initialize an spdk_spinlock */
1799 	g_spin_err_count = 0;
1800 	spdk_spin_init(&lock);
1801 	CU_ASSERT(g_spin_err_count == 0);
1802 
1803 	/* Trying to take a lock while not on an SPDK thread is an error */
1804 	g_spin_err_count = 0;
1805 	spdk_spin_lock(&lock);
1806 	CU_ASSERT(g_spin_err_count == 1);
1807 	CU_ASSERT(g_spin_err == SPIN_ERR_NOT_SPDK_THREAD);
1808 
1809 	/* Trying to check if a lock is held while not on an SPDK thread is an error */
1810 	g_spin_err_count = 0;
1811 	spdk_spin_held(&lock);
1812 	CU_ASSERT(g_spin_err_count == 1);
1813 	CU_ASSERT(g_spin_err == SPIN_ERR_NOT_SPDK_THREAD);
1814 
1815 	/* Do not need to be on an SPDK thread to destroy an spdk_spinlock */
1816 	g_spin_err_count = 0;
1817 	spdk_spin_destroy(&lock);
1818 	CU_ASSERT(g_spin_err_count == 0);
1819 
1820 	allocate_threads(2);
1821 	set_thread(0);
1822 
1823 	/* Can initialize an spdk_spinlock on an SPDK thread */
1824 	g_spin_err_count = 0;
1825 	spdk_spin_init(&lock);
1826 	CU_ASSERT(g_spin_err_count == 0);
1827 
1828 	/* Can take spinlock */
1829 	g_spin_err_count = 0;
1830 	spdk_spin_lock(&lock);
1831 	CU_ASSERT(g_spin_err_count == 0);
1832 
1833 	/* Can release spinlock */
1834 	g_spin_err_count = 0;
1835 	spdk_spin_unlock(&lock);
1836 	CU_ASSERT(g_spin_err_count == 0);
1837 
1838 	/* Deadlock detected */
1839 	g_spin_err_count = 0;
1840 	g_spin_err = SPIN_ERR_NONE;
1841 	spdk_spin_lock(&lock);
1842 	CU_ASSERT(g_spin_err_count == 0);
1843 	spdk_spin_lock(&lock);
1844 	CU_ASSERT(g_spin_err_count == 1);
1845 	CU_ASSERT(g_spin_err == SPIN_ERR_DEADLOCK);
1846 
1847 	/* Cannot unlock from wrong thread */
1848 	set_thread(1);
1849 	g_spin_err_count = 0;
1850 	spdk_spin_unlock(&lock);
1851 	CU_ASSERT(g_spin_err_count == 1);
1852 	CU_ASSERT(g_spin_err == SPIN_ERR_WRONG_THREAD);
1853 
1854 	/* Get back to a known good state */
1855 	set_thread(0);
1856 	g_spin_err_count = 0;
1857 	spdk_spin_unlock(&lock);
1858 	CU_ASSERT(g_spin_err_count == 0);
1859 
1860 	/* Cannot release the same lock twice */
1861 	g_spin_err_count = 0;
1862 	spdk_spin_lock(&lock);
1863 	CU_ASSERT(g_spin_err_count == 0);
1864 	spdk_spin_unlock(&lock);
1865 	CU_ASSERT(g_spin_err_count == 0);
1866 	spdk_spin_unlock(&lock);
1867 	CU_ASSERT(g_spin_err_count == 1);
1868 	CU_ASSERT(g_spin_err == SPIN_ERR_WRONG_THREAD);
1869 
1870 	/* A lock that is not held is properly recognized */
1871 	g_spin_err_count = 0;
1872 	CU_ASSERT(!spdk_spin_held(&lock));
1873 	CU_ASSERT(g_spin_err_count == 0);
1874 
1875 	/* A lock that is held is recognized as held by only the thread that holds it. */
1876 	set_thread(1);
1877 	g_spin_err_count = 0;
1878 	spdk_spin_lock(&lock);
1879 	CU_ASSERT(g_spin_err_count == 0);
1880 	CU_ASSERT(spdk_spin_held(&lock));
1881 	CU_ASSERT(g_spin_err_count == 0);
1882 	set_thread(0);
1883 	CU_ASSERT(!spdk_spin_held(&lock));
1884 	CU_ASSERT(g_spin_err_count == 0);
1885 
1886 	/* After releasing, no one thinks it is held */
1887 	set_thread(1);
1888 	spdk_spin_unlock(&lock);
1889 	CU_ASSERT(g_spin_err_count == 0);
1890 	CU_ASSERT(!spdk_spin_held(&lock));
1891 	CU_ASSERT(g_spin_err_count == 0);
1892 	set_thread(0);
1893 	CU_ASSERT(!spdk_spin_held(&lock));
1894 	CU_ASSERT(g_spin_err_count == 0);
1895 
1896 	/* Destroying a lock that is held is an error. */
1897 	set_thread(0);
1898 	g_spin_err_count = 0;
1899 	spdk_spin_lock(&lock);
1900 	CU_ASSERT(g_spin_err_count == 0);
1901 	spdk_spin_destroy(&lock);
1902 	CU_ASSERT(g_spin_err_count == 1);
1903 	CU_ASSERT(g_spin_err == SPIN_ERR_LOCK_HELD);
1904 	g_spin_err_count = 0;
1905 	spdk_spin_unlock(&lock);
1906 	CU_ASSERT(g_spin_err_count == 0);
1907 
1908 	/* Clean up */
1909 	g_spin_err_count = 0;
1910 	spdk_spin_destroy(&lock);
1911 	CU_ASSERT(g_spin_err_count == 0);
1912 	free_threads();
1913 	g_spin_abort_fn = __posix_abort;
1914 }
1915 
1916 static void
1917 for_each_channel_and_thread_exit_race(void)
1918 {
1919 	struct spdk_io_channel *ch1, *ch2;
1920 	struct spdk_thread *thread0;
1921 	int ch_count = 0;
1922 	int msg_count = 0;
1923 
1924 	allocate_threads(3);
1925 	set_thread(0);
1926 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
1927 	set_thread(1);
1928 	ch1 = spdk_get_io_channel(&ch_count);
1929 	set_thread(2);
1930 	ch2 = spdk_get_io_channel(&ch_count);
1931 	CU_ASSERT(ch_count == 2);
1932 
1933 	/*
1934 	 * Test one race condition between spdk_thread_exit() and spdk_for_each_channel().
1935 	 *
1936 	 * thread 0 does not have io_channel and calls spdk_thread_exit() immediately
1937 	 * after spdk_for_each_channel(). In this case, thread 0 should exit after
1938 	 * spdk_for_each_channel() completes.
1939 	 */
1940 
1941 	set_thread(0);
1942 	thread0 = spdk_get_thread();
1943 
1944 	CU_ASSERT(thread0->for_each_count == 0);
1945 
1946 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
1947 	CU_ASSERT(msg_count == 0);
1948 	CU_ASSERT(thread0->for_each_count == 1);
1949 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_RUNNING);
1950 
1951 	spdk_thread_exit(thread0);
1952 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITING);
1953 
1954 	poll_threads();
1955 	CU_ASSERT(msg_count == 3);
1956 	CU_ASSERT(thread0->for_each_count == 0);
1957 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITED);
1958 
1959 	set_thread(1);
1960 	spdk_put_io_channel(ch1);
1961 	CU_ASSERT(ch_count == 2);
1962 	set_thread(2);
1963 	spdk_put_io_channel(ch2);
1964 	CU_ASSERT(ch_count == 2);
1965 	poll_threads();
1966 	CU_ASSERT(ch_count == 0);
1967 
1968 	spdk_io_device_unregister(&ch_count, NULL);
1969 	poll_threads();
1970 
1971 	free_threads();
1972 }
1973 
1974 static void
1975 for_each_thread_and_thread_exit_race(void)
1976 {
1977 	struct spdk_thread *thread0;
1978 	int count = 0;
1979 	int i;
1980 
1981 	allocate_threads(3);
1982 	set_thread(0);
1983 	thread0 = spdk_get_thread();
1984 
1985 	/* Even if thread 0 starts exiting, spdk_for_each_thread() should complete normally
1986 	 * and then thread 0 should be moved to EXITED.
1987 	 */
1988 
1989 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
1990 	CU_ASSERT(thread0->for_each_count == 1);
1991 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_RUNNING);
1992 
1993 	spdk_thread_exit(thread0);
1994 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITING);
1995 
1996 	/* We have not polled thread 0 yet, so count should be 0 */
1997 	CU_ASSERT(count == 0);
1998 
1999 	/* Poll each thread to verify the message is passed to each */
2000 	for (i = 0; i < 3; i++) {
2001 		poll_thread(i);
2002 		CU_ASSERT(count == (i + 1));
2003 	}
2004 
2005 	/*
2006 	 * After each thread is called, the completion calls it
2007 	 * one more time.
2008 	 */
2009 	poll_thread(0);
2010 	CU_ASSERT(count == 4);
2011 
2012 	CU_ASSERT(thread0->for_each_count == 0);
2013 	CU_ASSERT(thread0->state == SPDK_THREAD_STATE_EXITED);
2014 
2015 	free_threads();
2016 }
2017 
2018 int
2019 main(int argc, char **argv)
2020 {
2021 	CU_pSuite	suite = NULL;
2022 	unsigned int	num_failures;
2023 
2024 	CU_initialize_registry();
2025 
2026 	suite = CU_add_suite("io_channel", NULL, NULL);
2027 
2028 	CU_ADD_TEST(suite, thread_alloc);
2029 	CU_ADD_TEST(suite, thread_send_msg);
2030 	CU_ADD_TEST(suite, thread_poller);
2031 	CU_ADD_TEST(suite, poller_pause);
2032 	CU_ADD_TEST(suite, thread_for_each);
2033 	CU_ADD_TEST(suite, for_each_channel_remove);
2034 	CU_ADD_TEST(suite, for_each_channel_unreg);
2035 	CU_ADD_TEST(suite, thread_name);
2036 	CU_ADD_TEST(suite, channel);
2037 	CU_ADD_TEST(suite, channel_destroy_races);
2038 	CU_ADD_TEST(suite, thread_exit_test);
2039 	CU_ADD_TEST(suite, thread_update_stats_test);
2040 	CU_ADD_TEST(suite, nested_channel);
2041 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
2042 	CU_ADD_TEST(suite, cache_closest_timed_poller);
2043 	CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration);
2044 	CU_ADD_TEST(suite, io_device_lookup);
2045 	CU_ADD_TEST(suite, spdk_spin);
2046 	CU_ADD_TEST(suite, for_each_channel_and_thread_exit_race);
2047 	CU_ADD_TEST(suite, for_each_thread_and_thread_exit_race);
2048 
2049 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
2050 	CU_cleanup_registry();
2051 	return num_failures;
2052 }
2053