xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 32999ab917f67af61872f868585fd3d78ad6fb8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static bool
52 _thread_op_supported(enum spdk_thread_op op)
53 {
54 	switch (op) {
55 	case SPDK_THREAD_OP_NEW:
56 		return true;
57 	default:
58 		return false;
59 	}
60 }
61 
62 static int
63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64 {
65 	switch (op) {
66 	case SPDK_THREAD_OP_NEW:
67 		return _thread_schedule(thread);
68 	default:
69 		return -ENOTSUP;
70 	}
71 }
72 
73 static void
74 thread_alloc(void)
75 {
76 	struct spdk_thread *thread;
77 
78 	/* No schedule callback */
79 	spdk_thread_lib_init(NULL, 0);
80 	thread = spdk_thread_create(NULL, NULL);
81 	SPDK_CU_ASSERT_FATAL(thread != NULL);
82 	spdk_set_thread(thread);
83 	spdk_thread_exit(thread);
84 	while (!spdk_thread_is_exited(thread)) {
85 		spdk_thread_poll(thread, 0, 0);
86 	}
87 	spdk_thread_destroy(thread);
88 	spdk_thread_lib_fini();
89 
90 	/* Schedule callback exists */
91 	spdk_thread_lib_init(_thread_schedule, 0);
92 
93 	/* Scheduling succeeds */
94 	g_sched_rc = 0;
95 	thread = spdk_thread_create(NULL, NULL);
96 	SPDK_CU_ASSERT_FATAL(thread != NULL);
97 	spdk_set_thread(thread);
98 	spdk_thread_exit(thread);
99 	while (!spdk_thread_is_exited(thread)) {
100 		spdk_thread_poll(thread, 0, 0);
101 	}
102 	spdk_thread_destroy(thread);
103 
104 	/* Scheduling fails */
105 	g_sched_rc = -1;
106 	thread = spdk_thread_create(NULL, NULL);
107 	SPDK_CU_ASSERT_FATAL(thread == NULL);
108 
109 	spdk_thread_lib_fini();
110 
111 	/* Scheduling callback exists with extended thread library initialization. */
112 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
113 
114 	/* Scheduling succeeds */
115 	g_sched_rc = 0;
116 	thread = spdk_thread_create(NULL, NULL);
117 	SPDK_CU_ASSERT_FATAL(thread != NULL);
118 	spdk_set_thread(thread);
119 	spdk_thread_exit(thread);
120 	while (!spdk_thread_is_exited(thread)) {
121 		spdk_thread_poll(thread, 0, 0);
122 	}
123 	spdk_thread_destroy(thread);
124 
125 	/* Scheduling fails */
126 	g_sched_rc = -1;
127 	thread = spdk_thread_create(NULL, NULL);
128 	SPDK_CU_ASSERT_FATAL(thread == NULL);
129 
130 	spdk_thread_lib_fini();
131 }
132 
133 static void
134 send_msg_cb(void *ctx)
135 {
136 	bool *done = ctx;
137 
138 	*done = true;
139 }
140 
141 static void
142 thread_send_msg(void)
143 {
144 	struct spdk_thread *thread0;
145 	bool done = false;
146 
147 	allocate_threads(2);
148 	set_thread(0);
149 	thread0 = spdk_get_thread();
150 
151 	set_thread(1);
152 	/* Simulate thread 1 sending a message to thread 0. */
153 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
154 
155 	/* We have not polled thread 0 yet, so done should be false. */
156 	CU_ASSERT(!done);
157 
158 	/*
159 	 * Poll thread 1.  The message was sent to thread 0, so this should be
160 	 *  a nop and done should still be false.
161 	 */
162 	poll_thread(1);
163 	CU_ASSERT(!done);
164 
165 	/*
166 	 * Poll thread 0.  This should execute the message and done should then
167 	 *  be true.
168 	 */
169 	poll_thread(0);
170 	CU_ASSERT(done);
171 
172 	free_threads();
173 }
174 
175 static int
176 poller_run_done(void *ctx)
177 {
178 	bool	*poller_run = ctx;
179 
180 	*poller_run = true;
181 
182 	return -1;
183 }
184 
185 static void
186 thread_poller(void)
187 {
188 	struct spdk_poller	*poller = NULL;
189 	bool			poller_run = false;
190 
191 	allocate_threads(1);
192 
193 	set_thread(0);
194 	MOCK_SET(spdk_get_ticks, 0);
195 	/* Register a poller with no-wait time and test execution */
196 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
197 	CU_ASSERT(poller != NULL);
198 
199 	poll_threads();
200 	CU_ASSERT(poller_run == true);
201 
202 	spdk_poller_unregister(&poller);
203 	CU_ASSERT(poller == NULL);
204 
205 	/* Register a poller with 1000us wait time and test single execution */
206 	poller_run = false;
207 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
208 	CU_ASSERT(poller != NULL);
209 
210 	poll_threads();
211 	CU_ASSERT(poller_run == false);
212 
213 	spdk_delay_us(1000);
214 	poll_threads();
215 	CU_ASSERT(poller_run == true);
216 
217 	poller_run = false;
218 	poll_threads();
219 	CU_ASSERT(poller_run == false);
220 
221 	spdk_delay_us(1000);
222 	poll_threads();
223 	CU_ASSERT(poller_run == true);
224 
225 	spdk_poller_unregister(&poller);
226 	CU_ASSERT(poller == NULL);
227 
228 	free_threads();
229 }
230 
231 struct poller_ctx {
232 	struct spdk_poller	*poller;
233 	bool			run;
234 };
235 
236 static int
237 poller_run_pause(void *ctx)
238 {
239 	struct poller_ctx *poller_ctx = ctx;
240 
241 	poller_ctx->run = true;
242 	spdk_poller_pause(poller_ctx->poller);
243 
244 	return 0;
245 }
246 
247 static void
248 poller_msg_pause_cb(void *ctx)
249 {
250 	struct spdk_poller *poller = ctx;
251 
252 	spdk_poller_pause(poller);
253 }
254 
255 static void
256 poller_msg_resume_cb(void *ctx)
257 {
258 	struct spdk_poller *poller = ctx;
259 
260 	spdk_poller_resume(poller);
261 }
262 
263 static void
264 poller_pause(void)
265 {
266 	struct poller_ctx poller_ctx = {};
267 	unsigned int delay[] = { 0, 1000 };
268 	unsigned int i;
269 
270 	allocate_threads(1);
271 	set_thread(0);
272 
273 	/* Register a poller that pauses itself */
274 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
275 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
276 
277 	poller_ctx.run = false;
278 	poll_threads();
279 	CU_ASSERT_EQUAL(poller_ctx.run, true);
280 
281 	poller_ctx.run = false;
282 	poll_threads();
283 	CU_ASSERT_EQUAL(poller_ctx.run, false);
284 
285 	spdk_poller_unregister(&poller_ctx.poller);
286 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
287 
288 	/* Verify that resuming an unpaused poller doesn't do anything */
289 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
290 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
291 
292 	spdk_poller_resume(poller_ctx.poller);
293 
294 	poller_ctx.run = false;
295 	poll_threads();
296 	CU_ASSERT_EQUAL(poller_ctx.run, true);
297 
298 	/* Verify that pausing the same poller twice works too */
299 	spdk_poller_pause(poller_ctx.poller);
300 
301 	poller_ctx.run = false;
302 	poll_threads();
303 	CU_ASSERT_EQUAL(poller_ctx.run, false);
304 
305 	spdk_poller_pause(poller_ctx.poller);
306 	poll_threads();
307 	CU_ASSERT_EQUAL(poller_ctx.run, false);
308 
309 	spdk_poller_resume(poller_ctx.poller);
310 	poll_threads();
311 	CU_ASSERT_EQUAL(poller_ctx.run, true);
312 
313 	/* Verify that a poller is run when it's resumed immediately after pausing */
314 	poller_ctx.run = false;
315 	spdk_poller_pause(poller_ctx.poller);
316 	spdk_poller_resume(poller_ctx.poller);
317 	poll_threads();
318 	CU_ASSERT_EQUAL(poller_ctx.run, true);
319 
320 	spdk_poller_unregister(&poller_ctx.poller);
321 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
322 
323 	/* Poll the thread to make sure the previous poller gets unregistered */
324 	poll_threads();
325 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
326 
327 	/* Verify that it's possible to unregister a paused poller */
328 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
329 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
330 
331 	poller_ctx.run = false;
332 	poll_threads();
333 	CU_ASSERT_EQUAL(poller_ctx.run, true);
334 
335 	spdk_poller_pause(poller_ctx.poller);
336 
337 	poller_ctx.run = false;
338 	poll_threads();
339 	CU_ASSERT_EQUAL(poller_ctx.run, false);
340 
341 	spdk_poller_unregister(&poller_ctx.poller);
342 
343 	poll_threads();
344 	CU_ASSERT_EQUAL(poller_ctx.run, false);
345 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
346 
347 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
348 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
349 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
350 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
351 
352 		spdk_delay_us(delay[i]);
353 		poller_ctx.run = false;
354 		poll_threads();
355 		CU_ASSERT_EQUAL(poller_ctx.run, true);
356 
357 		spdk_poller_pause(poller_ctx.poller);
358 
359 		spdk_delay_us(delay[i]);
360 		poller_ctx.run = false;
361 		poll_threads();
362 		CU_ASSERT_EQUAL(poller_ctx.run, false);
363 
364 		spdk_poller_resume(poller_ctx.poller);
365 
366 		spdk_delay_us(delay[i]);
367 		poll_threads();
368 		CU_ASSERT_EQUAL(poller_ctx.run, true);
369 
370 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
371 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
372 
373 		spdk_delay_us(delay[i]);
374 		poller_ctx.run = false;
375 		poll_threads();
376 		CU_ASSERT_EQUAL(poller_ctx.run, false);
377 
378 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
379 
380 		poll_threads();
381 		if (delay[i] > 0) {
382 			spdk_delay_us(delay[i]);
383 			poll_threads();
384 		}
385 		CU_ASSERT_EQUAL(poller_ctx.run, true);
386 
387 		spdk_poller_unregister(&poller_ctx.poller);
388 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
389 	}
390 
391 	free_threads();
392 }
393 
394 static void
395 for_each_cb(void *ctx)
396 {
397 	int *count = ctx;
398 
399 	(*count)++;
400 }
401 
402 static void
403 thread_for_each(void)
404 {
405 	int count = 0;
406 	int i;
407 
408 	allocate_threads(3);
409 	set_thread(0);
410 
411 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
412 
413 	/* We have not polled thread 0 yet, so count should be 0 */
414 	CU_ASSERT(count == 0);
415 
416 	/* Poll each thread to verify the message is passed to each */
417 	for (i = 0; i < 3; i++) {
418 		poll_thread(i);
419 		CU_ASSERT(count == (i + 1));
420 	}
421 
422 	/*
423 	 * After each thread is called, the completion calls it
424 	 * one more time.
425 	 */
426 	poll_thread(0);
427 	CU_ASSERT(count == 4);
428 
429 	free_threads();
430 }
431 
432 static int
433 channel_create(void *io_device, void *ctx_buf)
434 {
435 	int *ch_count = io_device;
436 
437 	(*ch_count)++;
438 	return 0;
439 }
440 
441 static void
442 channel_destroy(void *io_device, void *ctx_buf)
443 {
444 	int *ch_count = io_device;
445 
446 	(*ch_count)--;
447 }
448 
449 static void
450 channel_msg(struct spdk_io_channel_iter *i)
451 {
452 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
453 
454 	(*msg_count)++;
455 	spdk_for_each_channel_continue(i, 0);
456 }
457 
458 static void
459 channel_cpl(struct spdk_io_channel_iter *i, int status)
460 {
461 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
462 
463 	(*msg_count)++;
464 }
465 
466 static void
467 for_each_channel_remove(void)
468 {
469 	struct spdk_io_channel *ch0, *ch1, *ch2;
470 	int ch_count = 0;
471 	int msg_count = 0;
472 
473 	allocate_threads(3);
474 	set_thread(0);
475 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
476 	ch0 = spdk_get_io_channel(&ch_count);
477 	set_thread(1);
478 	ch1 = spdk_get_io_channel(&ch_count);
479 	set_thread(2);
480 	ch2 = spdk_get_io_channel(&ch_count);
481 	CU_ASSERT(ch_count == 3);
482 
483 	/*
484 	 * Test that io_channel handles the case where we start to iterate through
485 	 *  the channels, and during the iteration, one of the channels is deleted.
486 	 * This is done in some different and sometimes non-intuitive orders, because
487 	 *  some operations are deferred and won't execute until their threads are
488 	 *  polled.
489 	 *
490 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
491 	 */
492 	set_thread(0);
493 	spdk_put_io_channel(ch0);
494 	CU_ASSERT(ch_count == 3);
495 	poll_threads();
496 	CU_ASSERT(ch_count == 2);
497 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
498 	CU_ASSERT(msg_count == 0);
499 	poll_threads();
500 	CU_ASSERT(msg_count == 3);
501 
502 	msg_count = 0;
503 
504 	/*
505 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
506 	 *  thread 0 is polled.
507 	 */
508 	ch0 = spdk_get_io_channel(&ch_count);
509 	CU_ASSERT(ch_count == 3);
510 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
511 	spdk_put_io_channel(ch0);
512 	CU_ASSERT(ch_count == 3);
513 
514 	poll_threads();
515 	CU_ASSERT(ch_count == 2);
516 	CU_ASSERT(msg_count == 4);
517 	set_thread(1);
518 	spdk_put_io_channel(ch1);
519 	CU_ASSERT(ch_count == 2);
520 	set_thread(2);
521 	spdk_put_io_channel(ch2);
522 	CU_ASSERT(ch_count == 2);
523 	poll_threads();
524 	CU_ASSERT(ch_count == 0);
525 
526 	spdk_io_device_unregister(&ch_count, NULL);
527 	poll_threads();
528 
529 	free_threads();
530 }
531 
532 struct unreg_ctx {
533 	bool	ch_done;
534 	bool	foreach_done;
535 };
536 
537 static void
538 unreg_ch_done(struct spdk_io_channel_iter *i)
539 {
540 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
541 
542 	ctx->ch_done = true;
543 
544 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
545 	spdk_for_each_channel_continue(i, 0);
546 }
547 
548 static void
549 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
550 {
551 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
552 
553 	ctx->foreach_done = true;
554 }
555 
556 static void
557 for_each_channel_unreg(void)
558 {
559 	struct spdk_io_channel *ch0;
560 	struct io_device *dev;
561 	struct unreg_ctx ctx = {};
562 	int io_target = 0;
563 
564 	allocate_threads(1);
565 	set_thread(0);
566 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
567 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
568 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
569 	dev = TAILQ_FIRST(&g_io_devices);
570 	SPDK_CU_ASSERT_FATAL(dev != NULL);
571 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
572 	ch0 = spdk_get_io_channel(&io_target);
573 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
574 
575 	spdk_io_device_unregister(&io_target, NULL);
576 	/*
577 	 * There is an outstanding foreach call on the io_device, so the unregister should not
578 	 *  have removed the device.
579 	 */
580 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
581 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
582 	/*
583 	 * There is already a device registered at &io_target, so a new io_device should not
584 	 *  have been added to g_io_devices.
585 	 */
586 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
587 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
588 
589 	poll_thread(0);
590 	CU_ASSERT(ctx.ch_done == true);
591 	CU_ASSERT(ctx.foreach_done == true);
592 	/*
593 	 * There are no more foreach operations outstanding, so we can unregister the device,
594 	 *  even though a channel still exists for the device.
595 	 */
596 	spdk_io_device_unregister(&io_target, NULL);
597 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
598 
599 	set_thread(0);
600 	spdk_put_io_channel(ch0);
601 
602 	poll_threads();
603 
604 	free_threads();
605 }
606 
607 static void
608 thread_name(void)
609 {
610 	struct spdk_thread *thread;
611 	const char *name;
612 
613 	spdk_thread_lib_init(NULL, 0);
614 
615 	/* Create thread with no name, which automatically generates one */
616 	thread = spdk_thread_create(NULL, NULL);
617 	spdk_set_thread(thread);
618 	thread = spdk_get_thread();
619 	SPDK_CU_ASSERT_FATAL(thread != NULL);
620 	name = spdk_thread_get_name(thread);
621 	CU_ASSERT(name != NULL);
622 	spdk_thread_exit(thread);
623 	while (!spdk_thread_is_exited(thread)) {
624 		spdk_thread_poll(thread, 0, 0);
625 	}
626 	spdk_thread_destroy(thread);
627 
628 	/* Create thread named "test_thread" */
629 	thread = spdk_thread_create("test_thread", NULL);
630 	spdk_set_thread(thread);
631 	thread = spdk_get_thread();
632 	SPDK_CU_ASSERT_FATAL(thread != NULL);
633 	name = spdk_thread_get_name(thread);
634 	SPDK_CU_ASSERT_FATAL(name != NULL);
635 	CU_ASSERT(strcmp(name, "test_thread") == 0);
636 	spdk_thread_exit(thread);
637 	while (!spdk_thread_is_exited(thread)) {
638 		spdk_thread_poll(thread, 0, 0);
639 	}
640 	spdk_thread_destroy(thread);
641 
642 	spdk_thread_lib_fini();
643 }
644 
645 static uint64_t g_device1;
646 static uint64_t g_device2;
647 static uint64_t g_device3;
648 
649 static uint64_t g_ctx1 = 0x1111;
650 static uint64_t g_ctx2 = 0x2222;
651 
652 static int g_create_cb_calls = 0;
653 static int g_destroy_cb_calls = 0;
654 
655 static int
656 create_cb_1(void *io_device, void *ctx_buf)
657 {
658 	CU_ASSERT(io_device == &g_device1);
659 	*(uint64_t *)ctx_buf = g_ctx1;
660 	g_create_cb_calls++;
661 	return 0;
662 }
663 
664 static void
665 destroy_cb_1(void *io_device, void *ctx_buf)
666 {
667 	CU_ASSERT(io_device == &g_device1);
668 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
669 	g_destroy_cb_calls++;
670 }
671 
672 static int
673 create_cb_2(void *io_device, void *ctx_buf)
674 {
675 	CU_ASSERT(io_device == &g_device2);
676 	*(uint64_t *)ctx_buf = g_ctx2;
677 	g_create_cb_calls++;
678 	return 0;
679 }
680 
681 static void
682 destroy_cb_2(void *io_device, void *ctx_buf)
683 {
684 	CU_ASSERT(io_device == &g_device2);
685 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
686 	g_destroy_cb_calls++;
687 }
688 
689 static void
690 channel(void)
691 {
692 	struct spdk_io_channel *ch1, *ch2;
693 	void *ctx;
694 
695 	allocate_threads(1);
696 	set_thread(0);
697 
698 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
699 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
700 
701 	g_create_cb_calls = 0;
702 	ch1 = spdk_get_io_channel(&g_device1);
703 	CU_ASSERT(g_create_cb_calls == 1);
704 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
705 	CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1);
706 
707 	g_create_cb_calls = 0;
708 	ch2 = spdk_get_io_channel(&g_device1);
709 	CU_ASSERT(g_create_cb_calls == 0);
710 	CU_ASSERT(ch1 == ch2);
711 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
712 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1);
713 
714 	g_destroy_cb_calls = 0;
715 	spdk_put_io_channel(ch2);
716 	poll_threads();
717 	CU_ASSERT(g_destroy_cb_calls == 0);
718 
719 	g_create_cb_calls = 0;
720 	ch2 = spdk_get_io_channel(&g_device2);
721 	CU_ASSERT(g_create_cb_calls == 1);
722 	CU_ASSERT(ch1 != ch2);
723 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
724 	CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2);
725 
726 	ctx = spdk_io_channel_get_ctx(ch2);
727 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
728 
729 	g_destroy_cb_calls = 0;
730 	spdk_put_io_channel(ch1);
731 	poll_threads();
732 	CU_ASSERT(g_destroy_cb_calls == 1);
733 
734 	g_destroy_cb_calls = 0;
735 	spdk_put_io_channel(ch2);
736 	poll_threads();
737 	CU_ASSERT(g_destroy_cb_calls == 1);
738 
739 	ch1 = spdk_get_io_channel(&g_device3);
740 	CU_ASSERT(ch1 == NULL);
741 
742 	spdk_io_device_unregister(&g_device1, NULL);
743 	poll_threads();
744 	spdk_io_device_unregister(&g_device2, NULL);
745 	poll_threads();
746 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
747 	free_threads();
748 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
749 }
750 
751 static int
752 create_cb(void *io_device, void *ctx_buf)
753 {
754 	uint64_t *refcnt = (uint64_t *)ctx_buf;
755 
756 	CU_ASSERT(*refcnt == 0);
757 	*refcnt = 1;
758 
759 	return 0;
760 }
761 
762 static void
763 destroy_cb(void *io_device, void *ctx_buf)
764 {
765 	uint64_t *refcnt = (uint64_t *)ctx_buf;
766 
767 	CU_ASSERT(*refcnt == 1);
768 	*refcnt = 0;
769 }
770 
771 /**
772  * This test is checking that a sequence of get, put, get, put without allowing
773  * the deferred put operation to complete doesn't result in releasing the memory
774  * for the channel twice.
775  */
776 static void
777 channel_destroy_races(void)
778 {
779 	uint64_t device;
780 	struct spdk_io_channel *ch;
781 
782 	allocate_threads(1);
783 	set_thread(0);
784 
785 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
786 
787 	ch = spdk_get_io_channel(&device);
788 	SPDK_CU_ASSERT_FATAL(ch != NULL);
789 
790 	spdk_put_io_channel(ch);
791 
792 	ch = spdk_get_io_channel(&device);
793 	SPDK_CU_ASSERT_FATAL(ch != NULL);
794 
795 	spdk_put_io_channel(ch);
796 	poll_threads();
797 
798 	spdk_io_device_unregister(&device, NULL);
799 	poll_threads();
800 
801 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
802 	free_threads();
803 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
804 }
805 
806 static void
807 thread_exit_test(void)
808 {
809 	struct spdk_thread *thread;
810 	struct spdk_io_channel *ch;
811 	struct spdk_poller *poller1, *poller2;
812 	void *ctx;
813 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
814 	int rc __attribute__((unused));
815 
816 	MOCK_SET(spdk_get_ticks, 10);
817 	MOCK_SET(spdk_get_ticks_hz, 1);
818 
819 	allocate_threads(4);
820 
821 	/* Test if all pending messages are reaped for the exiting thread, and the
822 	 * thread moves to the exited state.
823 	 */
824 	set_thread(0);
825 	thread = spdk_get_thread();
826 
827 	/* Sending message to thread 0 will be accepted. */
828 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
829 	CU_ASSERT(rc == 0);
830 	CU_ASSERT(!done1);
831 
832 	/* Move thread 0 to the exiting state. */
833 	spdk_thread_exit(thread);
834 
835 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
836 
837 	/* Sending message to thread 0 will be still accepted. */
838 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
839 	CU_ASSERT(rc == 0);
840 
841 	/* Thread 0 will reap pending messages. */
842 	poll_thread(0);
843 	CU_ASSERT(done1 == true);
844 	CU_ASSERT(done2 == true);
845 
846 	/* Thread 0 will move to the exited state. */
847 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
848 
849 	/* Test releasing I/O channel is reaped even after the thread moves to
850 	 * the exiting state
851 	 */
852 	set_thread(1);
853 
854 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
855 
856 	g_create_cb_calls = 0;
857 	ch = spdk_get_io_channel(&g_device1);
858 	CU_ASSERT(g_create_cb_calls == 1);
859 	SPDK_CU_ASSERT_FATAL(ch != NULL);
860 
861 	ctx = spdk_io_channel_get_ctx(ch);
862 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
863 
864 	g_destroy_cb_calls = 0;
865 	spdk_put_io_channel(ch);
866 
867 	thread = spdk_get_thread();
868 	spdk_thread_exit(thread);
869 
870 	/* Thread 1 will not move to the exited state yet because I/O channel release
871 	 * does not complete yet.
872 	 */
873 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
874 
875 	/* Thread 1 will be able to get the another reference of I/O channel
876 	 * even after the thread moves to the exiting state.
877 	 */
878 	g_create_cb_calls = 0;
879 	ch = spdk_get_io_channel(&g_device1);
880 
881 	CU_ASSERT(g_create_cb_calls == 0);
882 	SPDK_CU_ASSERT_FATAL(ch != NULL);
883 
884 	ctx = spdk_io_channel_get_ctx(ch);
885 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
886 
887 	spdk_put_io_channel(ch);
888 
889 	poll_threads();
890 	CU_ASSERT(g_destroy_cb_calls == 1);
891 
892 	/* Thread 1 will move to the exited state after I/O channel is released.
893 	 * are released.
894 	 */
895 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
896 
897 	spdk_io_device_unregister(&g_device1, NULL);
898 	poll_threads();
899 
900 	/* Test if unregistering poller is reaped for the exiting thread, and the
901 	 * thread moves to the exited thread.
902 	 */
903 	set_thread(2);
904 	thread = spdk_get_thread();
905 
906 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
907 	CU_ASSERT(poller1 != NULL);
908 
909 	spdk_poller_unregister(&poller1);
910 
911 	spdk_thread_exit(thread);
912 
913 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
914 
915 	poll_threads();
916 
917 	CU_ASSERT(poller1_run == false);
918 	CU_ASSERT(poller2_run == true);
919 
920 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
921 
922 	spdk_poller_unregister(&poller2);
923 
924 	poll_threads();
925 
926 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
927 
928 	/* Test if the exiting thread is exited forcefully after timeout. */
929 	set_thread(3);
930 	thread = spdk_get_thread();
931 
932 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
933 	CU_ASSERT(poller1 != NULL);
934 
935 	spdk_thread_exit(thread);
936 
937 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
938 
939 	MOCK_SET(spdk_get_ticks, 11);
940 
941 	poll_threads();
942 
943 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
944 
945 	/* Cause timeout forcefully. */
946 	MOCK_SET(spdk_get_ticks, 15);
947 
948 	poll_threads();
949 
950 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
951 
952 	spdk_poller_unregister(&poller1);
953 
954 	poll_threads();
955 
956 	MOCK_CLEAR(spdk_get_ticks);
957 	MOCK_CLEAR(spdk_get_ticks_hz);
958 
959 	free_threads();
960 }
961 
962 static int
963 poller_run_idle(void *ctx)
964 {
965 	uint64_t delay_us = (uint64_t)ctx;
966 
967 	spdk_delay_us(delay_us);
968 
969 	return 0;
970 }
971 
972 static int
973 poller_run_busy(void *ctx)
974 {
975 	uint64_t delay_us = (uint64_t)ctx;
976 
977 	spdk_delay_us(delay_us);
978 
979 	return 1;
980 }
981 
982 static void
983 thread_update_stats_test(void)
984 {
985 	struct spdk_poller	*poller;
986 	struct spdk_thread	*thread;
987 
988 	MOCK_SET(spdk_get_ticks, 10);
989 
990 	allocate_threads(1);
991 
992 	set_thread(0);
993 	thread = spdk_get_thread();
994 
995 	CU_ASSERT(thread->tsc_last == 10);
996 	CU_ASSERT(thread->stats.idle_tsc == 0);
997 	CU_ASSERT(thread->stats.busy_tsc == 0);
998 
999 	/* Test if idle_tsc is updated expectedly. */
1000 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
1001 	CU_ASSERT(poller != NULL);
1002 
1003 	spdk_delay_us(100);
1004 
1005 	poll_thread_times(0, 1);
1006 
1007 	CU_ASSERT(thread->tsc_last == 1110);
1008 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1009 	CU_ASSERT(thread->stats.busy_tsc == 0);
1010 
1011 	spdk_delay_us(100);
1012 
1013 	poll_thread_times(0, 1);
1014 
1015 	CU_ASSERT(thread->tsc_last == 2210);
1016 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1017 	CU_ASSERT(thread->stats.busy_tsc == 0);
1018 
1019 	spdk_poller_unregister(&poller);
1020 
1021 	/* Test if busy_tsc is updated expectedly. */
1022 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1023 	CU_ASSERT(poller != NULL);
1024 
1025 	spdk_delay_us(10000);
1026 
1027 	poll_thread_times(0, 1);
1028 
1029 	CU_ASSERT(thread->tsc_last == 112210);
1030 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1031 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1032 
1033 	spdk_delay_us(10000);
1034 
1035 	poll_thread_times(0, 1);
1036 
1037 	CU_ASSERT(thread->tsc_last == 222210);
1038 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1039 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1040 
1041 	spdk_poller_unregister(&poller);
1042 
1043 	MOCK_CLEAR(spdk_get_ticks);
1044 
1045 	free_threads();
1046 }
1047 
1048 struct ut_nested_ch {
1049 	struct spdk_io_channel *child;
1050 	struct spdk_poller *poller;
1051 };
1052 
1053 struct ut_nested_dev {
1054 	struct ut_nested_dev *child;
1055 };
1056 
1057 static struct io_device *
1058 ut_get_io_device(void *dev)
1059 {
1060 	struct io_device *tmp;
1061 
1062 	TAILQ_FOREACH(tmp, &g_io_devices, tailq) {
1063 		if (tmp->io_device == dev) {
1064 			return tmp;
1065 		}
1066 	}
1067 
1068 	return NULL;
1069 }
1070 
1071 static int
1072 ut_null_poll(void *ctx)
1073 {
1074 	return -1;
1075 }
1076 
1077 static int
1078 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1079 {
1080 	struct ut_nested_ch *_ch = ctx_buf;
1081 	struct ut_nested_dev *_dev = io_device;
1082 	struct ut_nested_dev *_child;
1083 
1084 	_child = _dev->child;
1085 
1086 	if (_child != NULL) {
1087 		_ch->child = spdk_get_io_channel(_child);
1088 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1089 	} else {
1090 		_ch->child = NULL;
1091 	}
1092 
1093 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1094 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1095 
1096 	return 0;
1097 }
1098 
1099 static void
1100 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1101 {
1102 	struct ut_nested_ch *_ch = ctx_buf;
1103 	struct spdk_io_channel *child;
1104 
1105 	child = _ch->child;
1106 	if (child != NULL) {
1107 		spdk_put_io_channel(child);
1108 	}
1109 
1110 	spdk_poller_unregister(&_ch->poller);
1111 }
1112 
1113 static void
1114 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1115 {
1116 	CU_ASSERT(ch->ref == 1);
1117 	CU_ASSERT(ch->dev == dev);
1118 	CU_ASSERT(dev->refcnt == 1);
1119 }
1120 
1121 static void
1122 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1123 {
1124 	CU_ASSERT(ch->ref == 0);
1125 	CU_ASSERT(ch->destroy_ref == 1);
1126 	CU_ASSERT(dev->refcnt == 1);
1127 }
1128 
1129 static void
1130 ut_check_nested_ch_destroy_post(struct io_device *dev)
1131 {
1132 	CU_ASSERT(dev->refcnt == 0);
1133 }
1134 
1135 static void
1136 ut_check_nested_poller_register(struct spdk_poller *poller)
1137 {
1138 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1139 }
1140 
1141 static void
1142 nested_channel(void)
1143 {
1144 	struct ut_nested_dev _dev1, _dev2, _dev3;
1145 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1146 	struct io_device *dev1, *dev2, *dev3;
1147 	struct spdk_io_channel *ch1, *ch2, *ch3;
1148 	struct spdk_poller *poller;
1149 	struct spdk_thread *thread;
1150 
1151 	allocate_threads(1);
1152 	set_thread(0);
1153 
1154 	thread = spdk_get_thread();
1155 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1156 
1157 	_dev1.child = &_dev2;
1158 	_dev2.child = &_dev3;
1159 	_dev3.child = NULL;
1160 
1161 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1162 				sizeof(struct ut_nested_ch), "dev1");
1163 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1164 				sizeof(struct ut_nested_ch), "dev2");
1165 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1166 				sizeof(struct ut_nested_ch), "dev3");
1167 
1168 	dev1 = ut_get_io_device(&_dev1);
1169 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1170 	dev2 = ut_get_io_device(&_dev2);
1171 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1172 	dev3 = ut_get_io_device(&_dev3);
1173 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1174 
1175 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1176 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1177 	 */
1178 	ch1 = spdk_get_io_channel(&_dev1);
1179 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1180 
1181 	_ch1 = spdk_io_channel_get_ctx(ch1);
1182 	ch2 = _ch1->child;
1183 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1184 
1185 	_ch2 = spdk_io_channel_get_ctx(ch2);
1186 	ch3 = _ch2->child;
1187 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1188 
1189 	_ch3 = spdk_io_channel_get_ctx(ch3);
1190 	CU_ASSERT(_ch3->child == NULL);
1191 
1192 	ut_check_nested_ch_create(ch1, dev1);
1193 	ut_check_nested_ch_create(ch2, dev2);
1194 	ut_check_nested_ch_create(ch3, dev3);
1195 
1196 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1197 
1198 	ut_check_nested_poller_register(poller);
1199 	ut_check_nested_poller_register(_ch1->poller);
1200 	ut_check_nested_poller_register(_ch2->poller);
1201 	ut_check_nested_poller_register(_ch3->poller);
1202 
1203 	spdk_poller_unregister(&poller);
1204 	poll_thread_times(0, 1);
1205 
1206 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1207 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1208 	 */
1209 	spdk_put_io_channel(ch1);
1210 
1211 	/* Start exiting the current thread after unregistering the non-nested
1212 	 * I/O channel.
1213 	 */
1214 	spdk_thread_exit(thread);
1215 
1216 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1217 	poll_thread_times(0, 1);
1218 	ut_check_nested_ch_destroy_post(dev1);
1219 
1220 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1221 
1222 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1223 	poll_thread_times(0, 1);
1224 	ut_check_nested_ch_destroy_post(dev2);
1225 
1226 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1227 
1228 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1229 	poll_thread_times(0, 1);
1230 	ut_check_nested_ch_destroy_post(dev3);
1231 
1232 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1233 
1234 	spdk_io_device_unregister(&_dev1, NULL);
1235 	spdk_io_device_unregister(&_dev2, NULL);
1236 	spdk_io_device_unregister(&_dev3, NULL);
1237 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
1238 
1239 	free_threads();
1240 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1241 }
1242 
1243 static int
1244 create_cb2(void *io_device, void *ctx_buf)
1245 {
1246 	uint64_t *devcnt = (uint64_t *)io_device;
1247 
1248 	*devcnt += 1;
1249 
1250 	return 0;
1251 }
1252 
1253 static void
1254 destroy_cb2(void *io_device, void *ctx_buf)
1255 {
1256 	uint64_t *devcnt = (uint64_t *)io_device;
1257 
1258 	CU_ASSERT(*devcnt > 0);
1259 	*devcnt -= 1;
1260 }
1261 
1262 static void
1263 unregister_cb2(void *io_device)
1264 {
1265 	uint64_t *devcnt = (uint64_t *)io_device;
1266 
1267 	CU_ASSERT(*devcnt == 0);
1268 }
1269 
1270 static void
1271 device_unregister_and_thread_exit_race(void)
1272 {
1273 	uint64_t device = 0;
1274 	struct spdk_io_channel *ch1, *ch2;
1275 	struct spdk_thread *thread1, *thread2;
1276 
1277 	/* Create two threads and each thread gets a channel from the same device. */
1278 	allocate_threads(2);
1279 	set_thread(0);
1280 
1281 	thread1 = spdk_get_thread();
1282 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1283 
1284 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1285 
1286 	ch1 = spdk_get_io_channel(&device);
1287 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1288 
1289 	set_thread(1);
1290 
1291 	thread2 = spdk_get_thread();
1292 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1293 
1294 	ch2 = spdk_get_io_channel(&device);
1295 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1296 
1297 	set_thread(0);
1298 
1299 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1300 	 * and a device are released.
1301 	 */
1302 	spdk_thread_exit(thread1);
1303 	poll_thread(0);
1304 
1305 	spdk_put_io_channel(ch1);
1306 
1307 	spdk_io_device_unregister(&device, unregister_cb2);
1308 	poll_thread(0);
1309 
1310 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1311 
1312 	set_thread(1);
1313 
1314 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1315 	 * is released.
1316 	 */
1317 	spdk_thread_exit(thread2);
1318 	poll_thread(1);
1319 
1320 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1321 
1322 	spdk_put_io_channel(ch2);
1323 	poll_thread(1);
1324 
1325 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1326 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1327 
1328 	poll_thread(0);
1329 
1330 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1331 
1332 	free_threads();
1333 }
1334 
1335 int
1336 main(int argc, char **argv)
1337 {
1338 	CU_pSuite	suite = NULL;
1339 	unsigned int	num_failures;
1340 
1341 	CU_set_error_action(CUEA_ABORT);
1342 	CU_initialize_registry();
1343 
1344 	suite = CU_add_suite("io_channel", NULL, NULL);
1345 
1346 	CU_ADD_TEST(suite, thread_alloc);
1347 	CU_ADD_TEST(suite, thread_send_msg);
1348 	CU_ADD_TEST(suite, thread_poller);
1349 	CU_ADD_TEST(suite, poller_pause);
1350 	CU_ADD_TEST(suite, thread_for_each);
1351 	CU_ADD_TEST(suite, for_each_channel_remove);
1352 	CU_ADD_TEST(suite, for_each_channel_unreg);
1353 	CU_ADD_TEST(suite, thread_name);
1354 	CU_ADD_TEST(suite, channel);
1355 	CU_ADD_TEST(suite, channel_destroy_races);
1356 	CU_ADD_TEST(suite, thread_exit_test);
1357 	CU_ADD_TEST(suite, thread_update_stats_test);
1358 	CU_ADD_TEST(suite, nested_channel);
1359 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1360 
1361 	CU_basic_set_mode(CU_BRM_VERBOSE);
1362 	CU_basic_run_tests();
1363 	num_failures = CU_get_number_of_failures();
1364 	CU_cleanup_registry();
1365 	return num_failures;
1366 }
1367