xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 8bb0ded3e55c182cea67af1f6790f8de5f38c05f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static bool
52 _thread_op_supported(enum spdk_thread_op op)
53 {
54 	switch (op) {
55 	case SPDK_THREAD_OP_NEW:
56 		return true;
57 	default:
58 		return false;
59 	}
60 }
61 
62 static int
63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64 {
65 	switch (op) {
66 	case SPDK_THREAD_OP_NEW:
67 		return _thread_schedule(thread);
68 	default:
69 		return -ENOTSUP;
70 	}
71 }
72 
73 static void
74 thread_alloc(void)
75 {
76 	struct spdk_thread *thread;
77 
78 	/* No schedule callback */
79 	spdk_thread_lib_init(NULL, 0);
80 	thread = spdk_thread_create(NULL, NULL);
81 	SPDK_CU_ASSERT_FATAL(thread != NULL);
82 	spdk_set_thread(thread);
83 	spdk_thread_exit(thread);
84 	while (!spdk_thread_is_exited(thread)) {
85 		spdk_thread_poll(thread, 0, 0);
86 	}
87 	spdk_thread_destroy(thread);
88 	spdk_thread_lib_fini();
89 
90 	/* Schedule callback exists */
91 	spdk_thread_lib_init(_thread_schedule, 0);
92 
93 	/* Scheduling succeeds */
94 	g_sched_rc = 0;
95 	thread = spdk_thread_create(NULL, NULL);
96 	SPDK_CU_ASSERT_FATAL(thread != NULL);
97 	spdk_set_thread(thread);
98 	spdk_thread_exit(thread);
99 	while (!spdk_thread_is_exited(thread)) {
100 		spdk_thread_poll(thread, 0, 0);
101 	}
102 	spdk_thread_destroy(thread);
103 
104 	/* Scheduling fails */
105 	g_sched_rc = -1;
106 	thread = spdk_thread_create(NULL, NULL);
107 	SPDK_CU_ASSERT_FATAL(thread == NULL);
108 
109 	spdk_thread_lib_fini();
110 
111 	/* Scheduling callback exists with extended thread library initialization. */
112 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
113 
114 	/* Scheduling succeeds */
115 	g_sched_rc = 0;
116 	thread = spdk_thread_create(NULL, NULL);
117 	SPDK_CU_ASSERT_FATAL(thread != NULL);
118 	spdk_set_thread(thread);
119 	spdk_thread_exit(thread);
120 	while (!spdk_thread_is_exited(thread)) {
121 		spdk_thread_poll(thread, 0, 0);
122 	}
123 	spdk_thread_destroy(thread);
124 
125 	/* Scheduling fails */
126 	g_sched_rc = -1;
127 	thread = spdk_thread_create(NULL, NULL);
128 	SPDK_CU_ASSERT_FATAL(thread == NULL);
129 
130 	spdk_thread_lib_fini();
131 }
132 
133 static void
134 send_msg_cb(void *ctx)
135 {
136 	bool *done = ctx;
137 
138 	*done = true;
139 }
140 
141 static void
142 thread_send_msg(void)
143 {
144 	struct spdk_thread *thread0;
145 	bool done = false;
146 
147 	allocate_threads(2);
148 	set_thread(0);
149 	thread0 = spdk_get_thread();
150 
151 	set_thread(1);
152 	/* Simulate thread 1 sending a message to thread 0. */
153 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
154 
155 	/* We have not polled thread 0 yet, so done should be false. */
156 	CU_ASSERT(!done);
157 
158 	/*
159 	 * Poll thread 1.  The message was sent to thread 0, so this should be
160 	 *  a nop and done should still be false.
161 	 */
162 	poll_thread(1);
163 	CU_ASSERT(!done);
164 
165 	/*
166 	 * Poll thread 0.  This should execute the message and done should then
167 	 *  be true.
168 	 */
169 	poll_thread(0);
170 	CU_ASSERT(done);
171 
172 	free_threads();
173 }
174 
175 static int
176 poller_run_done(void *ctx)
177 {
178 	bool	*poller_run = ctx;
179 
180 	*poller_run = true;
181 
182 	return -1;
183 }
184 
185 static void
186 thread_poller(void)
187 {
188 	struct spdk_poller	*poller = NULL;
189 	bool			poller_run = false;
190 
191 	allocate_threads(1);
192 
193 	set_thread(0);
194 	MOCK_SET(spdk_get_ticks, 0);
195 	/* Register a poller with no-wait time and test execution */
196 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
197 	CU_ASSERT(poller != NULL);
198 
199 	poll_threads();
200 	CU_ASSERT(poller_run == true);
201 
202 	spdk_poller_unregister(&poller);
203 	CU_ASSERT(poller == NULL);
204 
205 	/* Register a poller with 1000us wait time and test single execution */
206 	poller_run = false;
207 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
208 	CU_ASSERT(poller != NULL);
209 
210 	poll_threads();
211 	CU_ASSERT(poller_run == false);
212 
213 	spdk_delay_us(1000);
214 	poll_threads();
215 	CU_ASSERT(poller_run == true);
216 
217 	poller_run = false;
218 	poll_threads();
219 	CU_ASSERT(poller_run == false);
220 
221 	spdk_delay_us(1000);
222 	poll_threads();
223 	CU_ASSERT(poller_run == true);
224 
225 	spdk_poller_unregister(&poller);
226 	CU_ASSERT(poller == NULL);
227 
228 	free_threads();
229 }
230 
231 struct poller_ctx {
232 	struct spdk_poller	*poller;
233 	bool			run;
234 };
235 
236 static int
237 poller_run_pause(void *ctx)
238 {
239 	struct poller_ctx *poller_ctx = ctx;
240 
241 	poller_ctx->run = true;
242 	spdk_poller_pause(poller_ctx->poller);
243 
244 	return 0;
245 }
246 
247 static void
248 poller_msg_pause_cb(void *ctx)
249 {
250 	struct spdk_poller *poller = ctx;
251 
252 	spdk_poller_pause(poller);
253 }
254 
255 static void
256 poller_msg_resume_cb(void *ctx)
257 {
258 	struct spdk_poller *poller = ctx;
259 
260 	spdk_poller_resume(poller);
261 }
262 
263 static void
264 poller_pause(void)
265 {
266 	struct poller_ctx poller_ctx = {};
267 	unsigned int delay[] = { 0, 1000 };
268 	unsigned int i;
269 
270 	allocate_threads(1);
271 	set_thread(0);
272 
273 	/* Register a poller that pauses itself */
274 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
275 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
276 
277 	poller_ctx.run = false;
278 	poll_threads();
279 	CU_ASSERT_EQUAL(poller_ctx.run, true);
280 
281 	poller_ctx.run = false;
282 	poll_threads();
283 	CU_ASSERT_EQUAL(poller_ctx.run, false);
284 
285 	spdk_poller_unregister(&poller_ctx.poller);
286 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
287 
288 	/* Verify that resuming an unpaused poller doesn't do anything */
289 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
290 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
291 
292 	spdk_poller_resume(poller_ctx.poller);
293 
294 	poller_ctx.run = false;
295 	poll_threads();
296 	CU_ASSERT_EQUAL(poller_ctx.run, true);
297 
298 	/* Verify that pausing the same poller twice works too */
299 	spdk_poller_pause(poller_ctx.poller);
300 
301 	poller_ctx.run = false;
302 	poll_threads();
303 	CU_ASSERT_EQUAL(poller_ctx.run, false);
304 
305 	spdk_poller_pause(poller_ctx.poller);
306 	poll_threads();
307 	CU_ASSERT_EQUAL(poller_ctx.run, false);
308 
309 	spdk_poller_resume(poller_ctx.poller);
310 	poll_threads();
311 	CU_ASSERT_EQUAL(poller_ctx.run, true);
312 
313 	/* Verify that a poller is run when it's resumed immediately after pausing */
314 	poller_ctx.run = false;
315 	spdk_poller_pause(poller_ctx.poller);
316 	spdk_poller_resume(poller_ctx.poller);
317 	poll_threads();
318 	CU_ASSERT_EQUAL(poller_ctx.run, true);
319 
320 	spdk_poller_unregister(&poller_ctx.poller);
321 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
322 
323 	/* Poll the thread to make sure the previous poller gets unregistered */
324 	poll_threads();
325 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
326 
327 	/* Verify that it's possible to unregister a paused poller */
328 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
329 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
330 
331 	poller_ctx.run = false;
332 	poll_threads();
333 	CU_ASSERT_EQUAL(poller_ctx.run, true);
334 
335 	spdk_poller_pause(poller_ctx.poller);
336 
337 	poller_ctx.run = false;
338 	poll_threads();
339 	CU_ASSERT_EQUAL(poller_ctx.run, false);
340 
341 	spdk_poller_unregister(&poller_ctx.poller);
342 
343 	poll_threads();
344 	CU_ASSERT_EQUAL(poller_ctx.run, false);
345 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
346 
347 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
348 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
349 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
350 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
351 
352 		spdk_delay_us(delay[i]);
353 		poller_ctx.run = false;
354 		poll_threads();
355 		CU_ASSERT_EQUAL(poller_ctx.run, true);
356 
357 		spdk_poller_pause(poller_ctx.poller);
358 
359 		spdk_delay_us(delay[i]);
360 		poller_ctx.run = false;
361 		poll_threads();
362 		CU_ASSERT_EQUAL(poller_ctx.run, false);
363 
364 		spdk_poller_resume(poller_ctx.poller);
365 
366 		spdk_delay_us(delay[i]);
367 		poll_threads();
368 		CU_ASSERT_EQUAL(poller_ctx.run, true);
369 
370 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
371 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
372 
373 		spdk_delay_us(delay[i]);
374 		poller_ctx.run = false;
375 		poll_threads();
376 		CU_ASSERT_EQUAL(poller_ctx.run, false);
377 
378 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
379 
380 		poll_threads();
381 		if (delay[i] > 0) {
382 			spdk_delay_us(delay[i]);
383 			poll_threads();
384 		}
385 		CU_ASSERT_EQUAL(poller_ctx.run, true);
386 
387 		spdk_poller_unregister(&poller_ctx.poller);
388 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
389 	}
390 
391 	free_threads();
392 }
393 
394 static void
395 for_each_cb(void *ctx)
396 {
397 	int *count = ctx;
398 
399 	(*count)++;
400 }
401 
402 static void
403 thread_for_each(void)
404 {
405 	int count = 0;
406 	int i;
407 
408 	allocate_threads(3);
409 	set_thread(0);
410 
411 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
412 
413 	/* We have not polled thread 0 yet, so count should be 0 */
414 	CU_ASSERT(count == 0);
415 
416 	/* Poll each thread to verify the message is passed to each */
417 	for (i = 0; i < 3; i++) {
418 		poll_thread(i);
419 		CU_ASSERT(count == (i + 1));
420 	}
421 
422 	/*
423 	 * After each thread is called, the completion calls it
424 	 * one more time.
425 	 */
426 	poll_thread(0);
427 	CU_ASSERT(count == 4);
428 
429 	free_threads();
430 }
431 
432 static int
433 channel_create(void *io_device, void *ctx_buf)
434 {
435 	int *ch_count = io_device;
436 
437 	(*ch_count)++;
438 	return 0;
439 }
440 
441 static void
442 channel_destroy(void *io_device, void *ctx_buf)
443 {
444 	int *ch_count = io_device;
445 
446 	(*ch_count)--;
447 }
448 
449 static void
450 channel_msg(struct spdk_io_channel_iter *i)
451 {
452 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
453 
454 	(*msg_count)++;
455 	spdk_for_each_channel_continue(i, 0);
456 }
457 
458 static void
459 channel_cpl(struct spdk_io_channel_iter *i, int status)
460 {
461 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
462 
463 	(*msg_count)++;
464 }
465 
466 static void
467 for_each_channel_remove(void)
468 {
469 	struct spdk_io_channel *ch0, *ch1, *ch2;
470 	int ch_count = 0;
471 	int msg_count = 0;
472 
473 	allocate_threads(3);
474 	set_thread(0);
475 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
476 	ch0 = spdk_get_io_channel(&ch_count);
477 	set_thread(1);
478 	ch1 = spdk_get_io_channel(&ch_count);
479 	set_thread(2);
480 	ch2 = spdk_get_io_channel(&ch_count);
481 	CU_ASSERT(ch_count == 3);
482 
483 	/*
484 	 * Test that io_channel handles the case where we start to iterate through
485 	 *  the channels, and during the iteration, one of the channels is deleted.
486 	 * This is done in some different and sometimes non-intuitive orders, because
487 	 *  some operations are deferred and won't execute until their threads are
488 	 *  polled.
489 	 *
490 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
491 	 */
492 	set_thread(0);
493 	spdk_put_io_channel(ch0);
494 	CU_ASSERT(ch_count == 3);
495 	poll_threads();
496 	CU_ASSERT(ch_count == 2);
497 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
498 	CU_ASSERT(msg_count == 0);
499 	poll_threads();
500 	CU_ASSERT(msg_count == 3);
501 
502 	msg_count = 0;
503 
504 	/*
505 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
506 	 *  thread 0 is polled.
507 	 */
508 	ch0 = spdk_get_io_channel(&ch_count);
509 	CU_ASSERT(ch_count == 3);
510 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
511 	spdk_put_io_channel(ch0);
512 	CU_ASSERT(ch_count == 3);
513 
514 	poll_threads();
515 	CU_ASSERT(ch_count == 2);
516 	CU_ASSERT(msg_count == 4);
517 	set_thread(1);
518 	spdk_put_io_channel(ch1);
519 	CU_ASSERT(ch_count == 2);
520 	set_thread(2);
521 	spdk_put_io_channel(ch2);
522 	CU_ASSERT(ch_count == 2);
523 	poll_threads();
524 	CU_ASSERT(ch_count == 0);
525 
526 	spdk_io_device_unregister(&ch_count, NULL);
527 	poll_threads();
528 
529 	free_threads();
530 }
531 
532 struct unreg_ctx {
533 	bool	ch_done;
534 	bool	foreach_done;
535 };
536 
537 static void
538 unreg_ch_done(struct spdk_io_channel_iter *i)
539 {
540 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
541 
542 	ctx->ch_done = true;
543 
544 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
545 	spdk_for_each_channel_continue(i, 0);
546 }
547 
548 static void
549 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
550 {
551 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
552 
553 	ctx->foreach_done = true;
554 }
555 
556 static void
557 for_each_channel_unreg(void)
558 {
559 	struct spdk_io_channel *ch0;
560 	struct io_device *dev;
561 	struct unreg_ctx ctx = {};
562 	int io_target = 0;
563 
564 	allocate_threads(1);
565 	set_thread(0);
566 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
567 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
568 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
569 	dev = TAILQ_FIRST(&g_io_devices);
570 	SPDK_CU_ASSERT_FATAL(dev != NULL);
571 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
572 	ch0 = spdk_get_io_channel(&io_target);
573 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
574 
575 	spdk_io_device_unregister(&io_target, NULL);
576 	/*
577 	 * There is an outstanding foreach call on the io_device, so the unregister should not
578 	 *  have removed the device.
579 	 */
580 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
581 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
582 	/*
583 	 * There is already a device registered at &io_target, so a new io_device should not
584 	 *  have been added to g_io_devices.
585 	 */
586 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
587 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
588 
589 	poll_thread(0);
590 	CU_ASSERT(ctx.ch_done == true);
591 	CU_ASSERT(ctx.foreach_done == true);
592 	/*
593 	 * There are no more foreach operations outstanding, so we can unregister the device,
594 	 *  even though a channel still exists for the device.
595 	 */
596 	spdk_io_device_unregister(&io_target, NULL);
597 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
598 
599 	set_thread(0);
600 	spdk_put_io_channel(ch0);
601 
602 	poll_threads();
603 
604 	free_threads();
605 }
606 
607 static void
608 thread_name(void)
609 {
610 	struct spdk_thread *thread;
611 	const char *name;
612 
613 	spdk_thread_lib_init(NULL, 0);
614 
615 	/* Create thread with no name, which automatically generates one */
616 	thread = spdk_thread_create(NULL, NULL);
617 	spdk_set_thread(thread);
618 	thread = spdk_get_thread();
619 	SPDK_CU_ASSERT_FATAL(thread != NULL);
620 	name = spdk_thread_get_name(thread);
621 	CU_ASSERT(name != NULL);
622 	spdk_thread_exit(thread);
623 	while (!spdk_thread_is_exited(thread)) {
624 		spdk_thread_poll(thread, 0, 0);
625 	}
626 	spdk_thread_destroy(thread);
627 
628 	/* Create thread named "test_thread" */
629 	thread = spdk_thread_create("test_thread", NULL);
630 	spdk_set_thread(thread);
631 	thread = spdk_get_thread();
632 	SPDK_CU_ASSERT_FATAL(thread != NULL);
633 	name = spdk_thread_get_name(thread);
634 	SPDK_CU_ASSERT_FATAL(name != NULL);
635 	CU_ASSERT(strcmp(name, "test_thread") == 0);
636 	spdk_thread_exit(thread);
637 	while (!spdk_thread_is_exited(thread)) {
638 		spdk_thread_poll(thread, 0, 0);
639 	}
640 	spdk_thread_destroy(thread);
641 
642 	spdk_thread_lib_fini();
643 }
644 
645 static uint64_t g_device1;
646 static uint64_t g_device2;
647 static uint64_t g_device3;
648 
649 static uint64_t g_ctx1 = 0x1111;
650 static uint64_t g_ctx2 = 0x2222;
651 
652 static int g_create_cb_calls = 0;
653 static int g_destroy_cb_calls = 0;
654 
655 static int
656 create_cb_1(void *io_device, void *ctx_buf)
657 {
658 	CU_ASSERT(io_device == &g_device1);
659 	*(uint64_t *)ctx_buf = g_ctx1;
660 	g_create_cb_calls++;
661 	return 0;
662 }
663 
664 static void
665 destroy_cb_1(void *io_device, void *ctx_buf)
666 {
667 	CU_ASSERT(io_device == &g_device1);
668 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
669 	g_destroy_cb_calls++;
670 }
671 
672 static int
673 create_cb_2(void *io_device, void *ctx_buf)
674 {
675 	CU_ASSERT(io_device == &g_device2);
676 	*(uint64_t *)ctx_buf = g_ctx2;
677 	g_create_cb_calls++;
678 	return 0;
679 }
680 
681 static void
682 destroy_cb_2(void *io_device, void *ctx_buf)
683 {
684 	CU_ASSERT(io_device == &g_device2);
685 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
686 	g_destroy_cb_calls++;
687 }
688 
689 static void
690 channel(void)
691 {
692 	struct spdk_io_channel *ch1, *ch2;
693 	void *ctx;
694 
695 	allocate_threads(1);
696 	set_thread(0);
697 
698 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
699 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
700 
701 	g_create_cb_calls = 0;
702 	ch1 = spdk_get_io_channel(&g_device1);
703 	CU_ASSERT(g_create_cb_calls == 1);
704 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
705 
706 	g_create_cb_calls = 0;
707 	ch2 = spdk_get_io_channel(&g_device1);
708 	CU_ASSERT(g_create_cb_calls == 0);
709 	CU_ASSERT(ch1 == ch2);
710 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
711 
712 	g_destroy_cb_calls = 0;
713 	spdk_put_io_channel(ch2);
714 	poll_threads();
715 	CU_ASSERT(g_destroy_cb_calls == 0);
716 
717 	g_create_cb_calls = 0;
718 	ch2 = spdk_get_io_channel(&g_device2);
719 	CU_ASSERT(g_create_cb_calls == 1);
720 	CU_ASSERT(ch1 != ch2);
721 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
722 
723 	ctx = spdk_io_channel_get_ctx(ch2);
724 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
725 
726 	g_destroy_cb_calls = 0;
727 	spdk_put_io_channel(ch1);
728 	poll_threads();
729 	CU_ASSERT(g_destroy_cb_calls == 1);
730 
731 	g_destroy_cb_calls = 0;
732 	spdk_put_io_channel(ch2);
733 	poll_threads();
734 	CU_ASSERT(g_destroy_cb_calls == 1);
735 
736 	ch1 = spdk_get_io_channel(&g_device3);
737 	CU_ASSERT(ch1 == NULL);
738 
739 	spdk_io_device_unregister(&g_device1, NULL);
740 	poll_threads();
741 	spdk_io_device_unregister(&g_device2, NULL);
742 	poll_threads();
743 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
744 	free_threads();
745 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
746 }
747 
748 static int
749 create_cb(void *io_device, void *ctx_buf)
750 {
751 	uint64_t *refcnt = (uint64_t *)ctx_buf;
752 
753 	CU_ASSERT(*refcnt == 0);
754 	*refcnt = 1;
755 
756 	return 0;
757 }
758 
759 static void
760 destroy_cb(void *io_device, void *ctx_buf)
761 {
762 	uint64_t *refcnt = (uint64_t *)ctx_buf;
763 
764 	CU_ASSERT(*refcnt == 1);
765 	*refcnt = 0;
766 }
767 
768 /**
769  * This test is checking that a sequence of get, put, get, put without allowing
770  * the deferred put operation to complete doesn't result in releasing the memory
771  * for the channel twice.
772  */
773 static void
774 channel_destroy_races(void)
775 {
776 	uint64_t device;
777 	struct spdk_io_channel *ch;
778 
779 	allocate_threads(1);
780 	set_thread(0);
781 
782 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
783 
784 	ch = spdk_get_io_channel(&device);
785 	SPDK_CU_ASSERT_FATAL(ch != NULL);
786 
787 	spdk_put_io_channel(ch);
788 
789 	ch = spdk_get_io_channel(&device);
790 	SPDK_CU_ASSERT_FATAL(ch != NULL);
791 
792 	spdk_put_io_channel(ch);
793 	poll_threads();
794 
795 	spdk_io_device_unregister(&device, NULL);
796 	poll_threads();
797 
798 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
799 	free_threads();
800 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
801 }
802 
803 static void
804 thread_exit_test(void)
805 {
806 	struct spdk_thread *thread;
807 	struct spdk_io_channel *ch;
808 	struct spdk_poller *poller1, *poller2;
809 	void *ctx;
810 	bool done1 = false, done2 = false, poller1_run = false, poller2_run = false;
811 	int rc __attribute__((unused));
812 
813 	MOCK_SET(spdk_get_ticks, 10);
814 	MOCK_SET(spdk_get_ticks_hz, 1);
815 
816 	allocate_threads(4);
817 
818 	/* Test if all pending messages are reaped for the exiting thread, and the
819 	 * thread moves to the exited state.
820 	 */
821 	set_thread(0);
822 	thread = spdk_get_thread();
823 
824 	/* Sending message to thread 0 will be accepted. */
825 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
826 	CU_ASSERT(rc == 0);
827 	CU_ASSERT(!done1);
828 
829 	/* Move thread 0 to the exiting state. */
830 	spdk_thread_exit(thread);
831 
832 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
833 
834 	/* Sending message to thread 0 will be still accepted. */
835 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
836 	CU_ASSERT(rc == 0);
837 
838 	/* Thread 0 will reap pending messages. */
839 	poll_thread(0);
840 	CU_ASSERT(done1 == true);
841 	CU_ASSERT(done2 == true);
842 
843 	/* Thread 0 will move to the exited state. */
844 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
845 
846 	/* Test releasing I/O channel is reaped even after the thread moves to
847 	 * the exiting state
848 	 */
849 	set_thread(1);
850 
851 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
852 
853 	g_create_cb_calls = 0;
854 	ch = spdk_get_io_channel(&g_device1);
855 	CU_ASSERT(g_create_cb_calls == 1);
856 	SPDK_CU_ASSERT_FATAL(ch != NULL);
857 
858 	ctx = spdk_io_channel_get_ctx(ch);
859 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
860 
861 	g_destroy_cb_calls = 0;
862 	spdk_put_io_channel(ch);
863 
864 	thread = spdk_get_thread();
865 	spdk_thread_exit(thread);
866 
867 	/* Thread 1 will not move to the exited state yet because I/O channel release
868 	 * does not complete yet.
869 	 */
870 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
871 
872 	/* Thread 1 will be able to get the another reference of I/O channel
873 	 * even after the thread moves to the exiting state.
874 	 */
875 	g_create_cb_calls = 0;
876 	ch = spdk_get_io_channel(&g_device1);
877 
878 	CU_ASSERT(g_create_cb_calls == 0);
879 	SPDK_CU_ASSERT_FATAL(ch != NULL);
880 
881 	ctx = spdk_io_channel_get_ctx(ch);
882 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
883 
884 	spdk_put_io_channel(ch);
885 
886 	poll_threads();
887 	CU_ASSERT(g_destroy_cb_calls == 1);
888 
889 	/* Thread 1 will move to the exited state after I/O channel is released.
890 	 * are released.
891 	 */
892 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
893 
894 	spdk_io_device_unregister(&g_device1, NULL);
895 	poll_threads();
896 
897 	/* Test if unregistering poller is reaped for the exiting thread, and the
898 	 * thread moves to the exited thread.
899 	 */
900 	set_thread(2);
901 	thread = spdk_get_thread();
902 
903 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
904 	CU_ASSERT(poller1 != NULL);
905 
906 	spdk_poller_unregister(&poller1);
907 
908 	spdk_thread_exit(thread);
909 
910 	poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0);
911 
912 	poll_threads();
913 
914 	CU_ASSERT(poller1_run == false);
915 	CU_ASSERT(poller2_run == true);
916 
917 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
918 
919 	spdk_poller_unregister(&poller2);
920 
921 	poll_threads();
922 
923 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
924 
925 	/* Test if the exiting thread is exited forcefully after timeout. */
926 	set_thread(3);
927 	thread = spdk_get_thread();
928 
929 	poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0);
930 	CU_ASSERT(poller1 != NULL);
931 
932 	spdk_thread_exit(thread);
933 
934 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
935 
936 	MOCK_SET(spdk_get_ticks, 11);
937 
938 	poll_threads();
939 
940 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
941 
942 	/* Cause timeout forcefully. */
943 	MOCK_SET(spdk_get_ticks, 15);
944 
945 	poll_threads();
946 
947 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
948 
949 	spdk_poller_unregister(&poller1);
950 
951 	poll_threads();
952 
953 	MOCK_CLEAR(spdk_get_ticks);
954 	MOCK_CLEAR(spdk_get_ticks_hz);
955 
956 	free_threads();
957 }
958 
959 static int
960 poller_run_idle(void *ctx)
961 {
962 	uint64_t delay_us = (uint64_t)ctx;
963 
964 	spdk_delay_us(delay_us);
965 
966 	return 0;
967 }
968 
969 static int
970 poller_run_busy(void *ctx)
971 {
972 	uint64_t delay_us = (uint64_t)ctx;
973 
974 	spdk_delay_us(delay_us);
975 
976 	return 1;
977 }
978 
979 static void
980 thread_update_stats_test(void)
981 {
982 	struct spdk_poller	*poller;
983 	struct spdk_thread	*thread;
984 
985 	MOCK_SET(spdk_get_ticks, 10);
986 
987 	allocate_threads(1);
988 
989 	set_thread(0);
990 	thread = spdk_get_thread();
991 
992 	CU_ASSERT(thread->tsc_last == 10);
993 	CU_ASSERT(thread->stats.idle_tsc == 0);
994 	CU_ASSERT(thread->stats.busy_tsc == 0);
995 
996 	/* Test if idle_tsc is updated expectedly. */
997 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
998 	CU_ASSERT(poller != NULL);
999 
1000 	spdk_delay_us(100);
1001 
1002 	poll_thread_times(0, 1);
1003 
1004 	CU_ASSERT(thread->tsc_last == 1110);
1005 	CU_ASSERT(thread->stats.idle_tsc == 1000);
1006 	CU_ASSERT(thread->stats.busy_tsc == 0);
1007 
1008 	spdk_delay_us(100);
1009 
1010 	poll_thread_times(0, 1);
1011 
1012 	CU_ASSERT(thread->tsc_last == 2210);
1013 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1014 	CU_ASSERT(thread->stats.busy_tsc == 0);
1015 
1016 	spdk_poller_unregister(&poller);
1017 
1018 	/* Test if busy_tsc is updated expectedly. */
1019 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
1020 	CU_ASSERT(poller != NULL);
1021 
1022 	spdk_delay_us(10000);
1023 
1024 	poll_thread_times(0, 1);
1025 
1026 	CU_ASSERT(thread->tsc_last == 112210);
1027 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1028 	CU_ASSERT(thread->stats.busy_tsc == 100000);
1029 
1030 	spdk_delay_us(10000);
1031 
1032 	poll_thread_times(0, 1);
1033 
1034 	CU_ASSERT(thread->tsc_last == 222210);
1035 	CU_ASSERT(thread->stats.idle_tsc == 2000);
1036 	CU_ASSERT(thread->stats.busy_tsc == 200000);
1037 
1038 	spdk_poller_unregister(&poller);
1039 
1040 	MOCK_CLEAR(spdk_get_ticks);
1041 
1042 	free_threads();
1043 }
1044 
1045 struct ut_nested_ch {
1046 	struct spdk_io_channel *child;
1047 	struct spdk_poller *poller;
1048 };
1049 
1050 struct ut_nested_dev {
1051 	struct ut_nested_dev *child;
1052 };
1053 
1054 static struct io_device *
1055 ut_get_io_device(void *dev)
1056 {
1057 	struct io_device *tmp;
1058 
1059 	TAILQ_FOREACH(tmp, &g_io_devices, tailq) {
1060 		if (tmp->io_device == dev) {
1061 			return tmp;
1062 		}
1063 	}
1064 
1065 	return NULL;
1066 }
1067 
1068 static int
1069 ut_null_poll(void *ctx)
1070 {
1071 	return -1;
1072 }
1073 
1074 static int
1075 ut_nested_ch_create_cb(void *io_device, void *ctx_buf)
1076 {
1077 	struct ut_nested_ch *_ch = ctx_buf;
1078 	struct ut_nested_dev *_dev = io_device;
1079 	struct ut_nested_dev *_child;
1080 
1081 	_child = _dev->child;
1082 
1083 	if (_child != NULL) {
1084 		_ch->child = spdk_get_io_channel(_child);
1085 		SPDK_CU_ASSERT_FATAL(_ch->child != NULL);
1086 	} else {
1087 		_ch->child = NULL;
1088 	}
1089 
1090 	_ch->poller = spdk_poller_register(ut_null_poll, NULL, 0);
1091 	SPDK_CU_ASSERT_FATAL(_ch->poller != NULL);
1092 
1093 	return 0;
1094 }
1095 
1096 static void
1097 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf)
1098 {
1099 	struct ut_nested_ch *_ch = ctx_buf;
1100 	struct spdk_io_channel *child;
1101 
1102 	child = _ch->child;
1103 	if (child != NULL) {
1104 		spdk_put_io_channel(child);
1105 	}
1106 
1107 	spdk_poller_unregister(&_ch->poller);
1108 }
1109 
1110 static void
1111 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev)
1112 {
1113 	CU_ASSERT(ch->ref == 1);
1114 	CU_ASSERT(ch->dev == dev);
1115 	CU_ASSERT(dev->refcnt == 1);
1116 }
1117 
1118 static void
1119 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev)
1120 {
1121 	CU_ASSERT(ch->ref == 0);
1122 	CU_ASSERT(ch->destroy_ref == 1);
1123 	CU_ASSERT(dev->refcnt == 1);
1124 }
1125 
1126 static void
1127 ut_check_nested_ch_destroy_post(struct io_device *dev)
1128 {
1129 	CU_ASSERT(dev->refcnt == 0);
1130 }
1131 
1132 static void
1133 ut_check_nested_poller_register(struct spdk_poller *poller)
1134 {
1135 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1136 }
1137 
1138 static void
1139 nested_channel(void)
1140 {
1141 	struct ut_nested_dev _dev1, _dev2, _dev3;
1142 	struct ut_nested_ch *_ch1, *_ch2, *_ch3;
1143 	struct io_device *dev1, *dev2, *dev3;
1144 	struct spdk_io_channel *ch1, *ch2, *ch3;
1145 	struct spdk_poller *poller;
1146 	struct spdk_thread *thread;
1147 
1148 	allocate_threads(1);
1149 	set_thread(0);
1150 
1151 	thread = spdk_get_thread();
1152 	SPDK_CU_ASSERT_FATAL(thread != NULL);
1153 
1154 	_dev1.child = &_dev2;
1155 	_dev2.child = &_dev3;
1156 	_dev3.child = NULL;
1157 
1158 	spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1159 				sizeof(struct ut_nested_ch), "dev1");
1160 	spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1161 				sizeof(struct ut_nested_ch), "dev2");
1162 	spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb,
1163 				sizeof(struct ut_nested_ch), "dev3");
1164 
1165 	dev1 = ut_get_io_device(&_dev1);
1166 	SPDK_CU_ASSERT_FATAL(dev1 != NULL);
1167 	dev2 = ut_get_io_device(&_dev2);
1168 	SPDK_CU_ASSERT_FATAL(dev2 != NULL);
1169 	dev3 = ut_get_io_device(&_dev3);
1170 	SPDK_CU_ASSERT_FATAL(dev3 != NULL);
1171 
1172 	/* A single call spdk_get_io_channel() to dev1 will also create channels
1173 	 * to dev2 and dev3 continuously. Pollers will be registered together.
1174 	 */
1175 	ch1 = spdk_get_io_channel(&_dev1);
1176 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1177 
1178 	_ch1 = spdk_io_channel_get_ctx(ch1);
1179 	ch2 = _ch1->child;
1180 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1181 
1182 	_ch2 = spdk_io_channel_get_ctx(ch2);
1183 	ch3 = _ch2->child;
1184 	SPDK_CU_ASSERT_FATAL(ch3 != NULL);
1185 
1186 	_ch3 = spdk_io_channel_get_ctx(ch3);
1187 	CU_ASSERT(_ch3->child == NULL);
1188 
1189 	ut_check_nested_ch_create(ch1, dev1);
1190 	ut_check_nested_ch_create(ch2, dev2);
1191 	ut_check_nested_ch_create(ch3, dev3);
1192 
1193 	poller = spdk_poller_register(ut_null_poll, NULL, 0);
1194 
1195 	ut_check_nested_poller_register(poller);
1196 	ut_check_nested_poller_register(_ch1->poller);
1197 	ut_check_nested_poller_register(_ch2->poller);
1198 	ut_check_nested_poller_register(_ch3->poller);
1199 
1200 	spdk_poller_unregister(&poller);
1201 	poll_thread_times(0, 1);
1202 
1203 	/* A single call spdk_put_io_channel() to dev1 will also destroy channels
1204 	 * to dev2 and dev3 continuously. Pollers will be unregistered together.
1205 	 */
1206 	spdk_put_io_channel(ch1);
1207 
1208 	/* Start exiting the current thread after unregistering the non-nested
1209 	 * I/O channel.
1210 	 */
1211 	spdk_thread_exit(thread);
1212 
1213 	ut_check_nested_ch_destroy_pre(ch1, dev1);
1214 	poll_thread_times(0, 1);
1215 	ut_check_nested_ch_destroy_post(dev1);
1216 
1217 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1218 
1219 	ut_check_nested_ch_destroy_pre(ch2, dev2);
1220 	poll_thread_times(0, 1);
1221 	ut_check_nested_ch_destroy_post(dev2);
1222 
1223 	CU_ASSERT(spdk_thread_is_exited(thread) == false);
1224 
1225 	ut_check_nested_ch_destroy_pre(ch3, dev3);
1226 	poll_thread_times(0, 1);
1227 	ut_check_nested_ch_destroy_post(dev3);
1228 
1229 	CU_ASSERT(spdk_thread_is_exited(thread) == true);
1230 
1231 	spdk_io_device_unregister(&_dev1, NULL);
1232 	spdk_io_device_unregister(&_dev2, NULL);
1233 	spdk_io_device_unregister(&_dev3, NULL);
1234 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
1235 
1236 	free_threads();
1237 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
1238 }
1239 
1240 static int
1241 create_cb2(void *io_device, void *ctx_buf)
1242 {
1243 	uint64_t *devcnt = (uint64_t *)io_device;
1244 
1245 	*devcnt += 1;
1246 
1247 	return 0;
1248 }
1249 
1250 static void
1251 destroy_cb2(void *io_device, void *ctx_buf)
1252 {
1253 	uint64_t *devcnt = (uint64_t *)io_device;
1254 
1255 	CU_ASSERT(*devcnt > 0);
1256 	*devcnt -= 1;
1257 }
1258 
1259 static void
1260 unregister_cb2(void *io_device)
1261 {
1262 	uint64_t *devcnt = (uint64_t *)io_device;
1263 
1264 	CU_ASSERT(*devcnt == 0);
1265 }
1266 
1267 static void
1268 device_unregister_and_thread_exit_race(void)
1269 {
1270 	uint64_t device = 0;
1271 	struct spdk_io_channel *ch1, *ch2;
1272 	struct spdk_thread *thread1, *thread2;
1273 
1274 	/* Create two threads and each thread gets a channel from the same device. */
1275 	allocate_threads(2);
1276 	set_thread(0);
1277 
1278 	thread1 = spdk_get_thread();
1279 	SPDK_CU_ASSERT_FATAL(thread1 != NULL);
1280 
1281 	spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL);
1282 
1283 	ch1 = spdk_get_io_channel(&device);
1284 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
1285 
1286 	set_thread(1);
1287 
1288 	thread2 = spdk_get_thread();
1289 	SPDK_CU_ASSERT_FATAL(thread2 != NULL);
1290 
1291 	ch2 = spdk_get_io_channel(&device);
1292 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
1293 
1294 	set_thread(0);
1295 
1296 	/* Move thread 0 to the exiting state, but it should keep exiting until two channels
1297 	 * and a device are released.
1298 	 */
1299 	spdk_thread_exit(thread1);
1300 	poll_thread(0);
1301 
1302 	spdk_put_io_channel(ch1);
1303 
1304 	spdk_io_device_unregister(&device, unregister_cb2);
1305 	poll_thread(0);
1306 
1307 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1308 
1309 	set_thread(1);
1310 
1311 	/* Move thread 1 to the exiting state, but it should keep exiting until its channel
1312 	 * is released.
1313 	 */
1314 	spdk_thread_exit(thread2);
1315 	poll_thread(1);
1316 
1317 	CU_ASSERT(spdk_thread_is_exited(thread2) == false);
1318 
1319 	spdk_put_io_channel(ch2);
1320 	poll_thread(1);
1321 
1322 	CU_ASSERT(spdk_thread_is_exited(thread1) == false);
1323 	CU_ASSERT(spdk_thread_is_exited(thread2) == true);
1324 
1325 	poll_thread(0);
1326 
1327 	CU_ASSERT(spdk_thread_is_exited(thread1) == true);
1328 
1329 	free_threads();
1330 }
1331 
1332 int
1333 main(int argc, char **argv)
1334 {
1335 	CU_pSuite	suite = NULL;
1336 	unsigned int	num_failures;
1337 
1338 	CU_set_error_action(CUEA_ABORT);
1339 	CU_initialize_registry();
1340 
1341 	suite = CU_add_suite("io_channel", NULL, NULL);
1342 
1343 	CU_ADD_TEST(suite, thread_alloc);
1344 	CU_ADD_TEST(suite, thread_send_msg);
1345 	CU_ADD_TEST(suite, thread_poller);
1346 	CU_ADD_TEST(suite, poller_pause);
1347 	CU_ADD_TEST(suite, thread_for_each);
1348 	CU_ADD_TEST(suite, for_each_channel_remove);
1349 	CU_ADD_TEST(suite, for_each_channel_unreg);
1350 	CU_ADD_TEST(suite, thread_name);
1351 	CU_ADD_TEST(suite, channel);
1352 	CU_ADD_TEST(suite, channel_destroy_races);
1353 	CU_ADD_TEST(suite, thread_exit_test);
1354 	CU_ADD_TEST(suite, thread_update_stats_test);
1355 	CU_ADD_TEST(suite, nested_channel);
1356 	CU_ADD_TEST(suite, device_unregister_and_thread_exit_race);
1357 
1358 	CU_basic_set_mode(CU_BRM_VERBOSE);
1359 	CU_basic_run_tests();
1360 	num_failures = CU_get_number_of_failures();
1361 	CU_cleanup_registry();
1362 	return num_failures;
1363 }
1364