xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 19d5c3ed8e87dbd240c77ae0ddb5eda25ae99b5f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static void
52 thread_alloc(void)
53 {
54 	struct spdk_thread *thread;
55 
56 	/* No schedule callback */
57 	spdk_thread_lib_init(NULL, 0);
58 	thread = spdk_thread_create(NULL, NULL);
59 	SPDK_CU_ASSERT_FATAL(thread != NULL);
60 	spdk_set_thread(thread);
61 	spdk_thread_exit(thread);
62 	spdk_thread_destroy(thread);
63 	spdk_thread_lib_fini();
64 
65 	/* Schedule callback exists */
66 	spdk_thread_lib_init(_thread_schedule, 0);
67 
68 	/* Scheduling succeeds */
69 	g_sched_rc = 0;
70 	thread = spdk_thread_create(NULL, NULL);
71 	SPDK_CU_ASSERT_FATAL(thread != NULL);
72 	spdk_set_thread(thread);
73 	spdk_thread_exit(thread);
74 	spdk_thread_destroy(thread);
75 
76 	/* Scheduling fails */
77 	g_sched_rc = -1;
78 	thread = spdk_thread_create(NULL, NULL);
79 	SPDK_CU_ASSERT_FATAL(thread == NULL);
80 
81 	spdk_thread_lib_fini();
82 }
83 
84 static void
85 send_msg_cb(void *ctx)
86 {
87 	bool *done = ctx;
88 
89 	*done = true;
90 }
91 
92 static void
93 thread_send_msg(void)
94 {
95 	struct spdk_thread *thread0;
96 	bool done = false;
97 
98 	allocate_threads(2);
99 	set_thread(0);
100 	thread0 = spdk_get_thread();
101 
102 	set_thread(1);
103 	/* Simulate thread 1 sending a message to thread 0. */
104 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
105 
106 	/* We have not polled thread 0 yet, so done should be false. */
107 	CU_ASSERT(!done);
108 
109 	/*
110 	 * Poll thread 1.  The message was sent to thread 0, so this should be
111 	 *  a nop and done should still be false.
112 	 */
113 	poll_thread(1);
114 	CU_ASSERT(!done);
115 
116 	/*
117 	 * Poll thread 0.  This should execute the message and done should then
118 	 *  be true.
119 	 */
120 	poll_thread(0);
121 	CU_ASSERT(done);
122 
123 	free_threads();
124 }
125 
126 static int
127 poller_run_done(void *ctx)
128 {
129 	bool	*poller_run = ctx;
130 
131 	*poller_run = true;
132 
133 	return -1;
134 }
135 
136 static void
137 thread_poller(void)
138 {
139 	struct spdk_poller	*poller = NULL;
140 	bool			poller_run = false;
141 
142 	allocate_threads(1);
143 
144 	set_thread(0);
145 	MOCK_SET(spdk_get_ticks, 0);
146 	/* Register a poller with no-wait time and test execution */
147 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
148 	CU_ASSERT(poller != NULL);
149 
150 	poll_threads();
151 	CU_ASSERT(poller_run == true);
152 
153 	spdk_poller_unregister(&poller);
154 	CU_ASSERT(poller == NULL);
155 
156 	/* Register a poller with 1000us wait time and test single execution */
157 	poller_run = false;
158 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
159 	CU_ASSERT(poller != NULL);
160 
161 	poll_threads();
162 	CU_ASSERT(poller_run == false);
163 
164 	spdk_delay_us(1000);
165 	poll_threads();
166 	CU_ASSERT(poller_run == true);
167 
168 	poller_run = false;
169 	poll_threads();
170 	CU_ASSERT(poller_run == false);
171 
172 	spdk_delay_us(1000);
173 	poll_threads();
174 	CU_ASSERT(poller_run == true);
175 
176 	spdk_poller_unregister(&poller);
177 	CU_ASSERT(poller == NULL);
178 
179 	free_threads();
180 }
181 
182 struct poller_ctx {
183 	struct spdk_poller	*poller;
184 	bool			run;
185 };
186 
187 static int
188 poller_run_pause(void *ctx)
189 {
190 	struct poller_ctx *poller_ctx = ctx;
191 
192 	poller_ctx->run = true;
193 	spdk_poller_pause(poller_ctx->poller);
194 
195 	return 0;
196 }
197 
198 static void
199 poller_msg_pause_cb(void *ctx)
200 {
201 	struct spdk_poller *poller = ctx;
202 
203 	spdk_poller_pause(poller);
204 }
205 
206 static void
207 poller_msg_resume_cb(void *ctx)
208 {
209 	struct spdk_poller *poller = ctx;
210 
211 	spdk_poller_resume(poller);
212 }
213 
214 static void
215 poller_pause(void)
216 {
217 	struct poller_ctx poller_ctx = {};
218 	unsigned int delay[] = { 0, 1000 };
219 	unsigned int i;
220 
221 	allocate_threads(1);
222 	set_thread(0);
223 
224 	/* Register a poller that pauses itself */
225 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
226 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
227 
228 	poller_ctx.run = false;
229 	poll_threads();
230 	CU_ASSERT_EQUAL(poller_ctx.run, true);
231 
232 	poller_ctx.run = false;
233 	poll_threads();
234 	CU_ASSERT_EQUAL(poller_ctx.run, false);
235 
236 	spdk_poller_unregister(&poller_ctx.poller);
237 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
238 
239 	/* Verify that resuming an unpaused poller doesn't do anything */
240 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
241 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
242 
243 	spdk_poller_resume(poller_ctx.poller);
244 
245 	poller_ctx.run = false;
246 	poll_threads();
247 	CU_ASSERT_EQUAL(poller_ctx.run, true);
248 
249 	/* Verify that pausing the same poller twice works too */
250 	spdk_poller_pause(poller_ctx.poller);
251 
252 	poller_ctx.run = false;
253 	poll_threads();
254 	CU_ASSERT_EQUAL(poller_ctx.run, false);
255 
256 	spdk_poller_pause(poller_ctx.poller);
257 	poll_threads();
258 	CU_ASSERT_EQUAL(poller_ctx.run, false);
259 
260 	spdk_poller_resume(poller_ctx.poller);
261 	poll_threads();
262 	CU_ASSERT_EQUAL(poller_ctx.run, true);
263 
264 	/* Verify that a poller is run when it's resumed immediately after pausing */
265 	poller_ctx.run = false;
266 	spdk_poller_pause(poller_ctx.poller);
267 	spdk_poller_resume(poller_ctx.poller);
268 	poll_threads();
269 	CU_ASSERT_EQUAL(poller_ctx.run, true);
270 
271 	spdk_poller_unregister(&poller_ctx.poller);
272 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
273 
274 	/* Poll the thread to make sure the previous poller gets unregistered */
275 	poll_threads();
276 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
277 
278 	/* Verify that it's possible to unregister a paused poller */
279 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
280 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
281 
282 	poller_ctx.run = false;
283 	poll_threads();
284 	CU_ASSERT_EQUAL(poller_ctx.run, true);
285 
286 	spdk_poller_pause(poller_ctx.poller);
287 
288 	poller_ctx.run = false;
289 	poll_threads();
290 	CU_ASSERT_EQUAL(poller_ctx.run, false);
291 
292 	spdk_poller_unregister(&poller_ctx.poller);
293 
294 	poll_threads();
295 	CU_ASSERT_EQUAL(poller_ctx.run, false);
296 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
297 
298 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
299 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
300 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
301 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
302 
303 		spdk_delay_us(delay[i]);
304 		poller_ctx.run = false;
305 		poll_threads();
306 		CU_ASSERT_EQUAL(poller_ctx.run, true);
307 
308 		spdk_poller_pause(poller_ctx.poller);
309 
310 		spdk_delay_us(delay[i]);
311 		poller_ctx.run = false;
312 		poll_threads();
313 		CU_ASSERT_EQUAL(poller_ctx.run, false);
314 
315 		spdk_poller_resume(poller_ctx.poller);
316 
317 		spdk_delay_us(delay[i]);
318 		poll_threads();
319 		CU_ASSERT_EQUAL(poller_ctx.run, true);
320 
321 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
322 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
323 
324 		spdk_delay_us(delay[i]);
325 		poller_ctx.run = false;
326 		poll_threads();
327 		CU_ASSERT_EQUAL(poller_ctx.run, false);
328 
329 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
330 
331 		poll_threads();
332 		if (delay[i] > 0) {
333 			spdk_delay_us(delay[i]);
334 			poll_threads();
335 		}
336 		CU_ASSERT_EQUAL(poller_ctx.run, true);
337 
338 		spdk_poller_unregister(&poller_ctx.poller);
339 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
340 	}
341 
342 	free_threads();
343 }
344 
345 static void
346 for_each_cb(void *ctx)
347 {
348 	int *count = ctx;
349 
350 	(*count)++;
351 }
352 
353 static void
354 thread_for_each(void)
355 {
356 	int count = 0;
357 	int i;
358 
359 	allocate_threads(3);
360 	set_thread(0);
361 
362 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
363 
364 	/* We have not polled thread 0 yet, so count should be 0 */
365 	CU_ASSERT(count == 0);
366 
367 	/* Poll each thread to verify the message is passed to each */
368 	for (i = 0; i < 3; i++) {
369 		poll_thread(i);
370 		CU_ASSERT(count == (i + 1));
371 	}
372 
373 	/*
374 	 * After each thread is called, the completion calls it
375 	 * one more time.
376 	 */
377 	poll_thread(0);
378 	CU_ASSERT(count == 4);
379 
380 	free_threads();
381 }
382 
383 static int
384 channel_create(void *io_device, void *ctx_buf)
385 {
386 	int *ch_count = io_device;
387 
388 	(*ch_count)++;
389 	return 0;
390 }
391 
392 static void
393 channel_destroy(void *io_device, void *ctx_buf)
394 {
395 	int *ch_count = io_device;
396 
397 	(*ch_count)--;
398 }
399 
400 static void
401 channel_msg(struct spdk_io_channel_iter *i)
402 {
403 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
404 
405 	(*msg_count)++;
406 	spdk_for_each_channel_continue(i, 0);
407 }
408 
409 static void
410 channel_cpl(struct spdk_io_channel_iter *i, int status)
411 {
412 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
413 
414 	(*msg_count)++;
415 }
416 
417 static void
418 for_each_channel_remove(void)
419 {
420 	struct spdk_io_channel *ch0, *ch1, *ch2;
421 	int ch_count = 0;
422 	int msg_count = 0;
423 
424 	allocate_threads(3);
425 	set_thread(0);
426 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
427 	ch0 = spdk_get_io_channel(&ch_count);
428 	set_thread(1);
429 	ch1 = spdk_get_io_channel(&ch_count);
430 	set_thread(2);
431 	ch2 = spdk_get_io_channel(&ch_count);
432 	CU_ASSERT(ch_count == 3);
433 
434 	/*
435 	 * Test that io_channel handles the case where we start to iterate through
436 	 *  the channels, and during the iteration, one of the channels is deleted.
437 	 * This is done in some different and sometimes non-intuitive orders, because
438 	 *  some operations are deferred and won't execute until their threads are
439 	 *  polled.
440 	 *
441 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
442 	 */
443 	set_thread(0);
444 	spdk_put_io_channel(ch0);
445 	CU_ASSERT(ch_count == 3);
446 	poll_threads();
447 	CU_ASSERT(ch_count == 2);
448 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
449 	CU_ASSERT(msg_count == 0);
450 	poll_threads();
451 	CU_ASSERT(msg_count == 3);
452 
453 	msg_count = 0;
454 
455 	/*
456 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
457 	 *  thread 0 is polled.
458 	 */
459 	ch0 = spdk_get_io_channel(&ch_count);
460 	CU_ASSERT(ch_count == 3);
461 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
462 	spdk_put_io_channel(ch0);
463 	CU_ASSERT(ch_count == 3);
464 
465 	poll_threads();
466 	CU_ASSERT(ch_count == 2);
467 	CU_ASSERT(msg_count == 4);
468 	set_thread(1);
469 	spdk_put_io_channel(ch1);
470 	CU_ASSERT(ch_count == 2);
471 	set_thread(2);
472 	spdk_put_io_channel(ch2);
473 	CU_ASSERT(ch_count == 2);
474 	poll_threads();
475 	CU_ASSERT(ch_count == 0);
476 
477 	spdk_io_device_unregister(&ch_count, NULL);
478 	poll_threads();
479 
480 	free_threads();
481 }
482 
483 struct unreg_ctx {
484 	bool	ch_done;
485 	bool	foreach_done;
486 };
487 
488 static void
489 unreg_ch_done(struct spdk_io_channel_iter *i)
490 {
491 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
492 
493 	ctx->ch_done = true;
494 
495 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
496 	spdk_for_each_channel_continue(i, 0);
497 }
498 
499 static void
500 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
501 {
502 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
503 
504 	ctx->foreach_done = true;
505 }
506 
507 static void
508 for_each_channel_unreg(void)
509 {
510 	struct spdk_io_channel *ch0;
511 	struct io_device *dev;
512 	struct unreg_ctx ctx = {};
513 	int io_target;
514 
515 	allocate_threads(1);
516 	set_thread(0);
517 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
518 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
519 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
520 	dev = TAILQ_FIRST(&g_io_devices);
521 	SPDK_CU_ASSERT_FATAL(dev != NULL);
522 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
523 	ch0 = spdk_get_io_channel(&io_target);
524 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
525 
526 	spdk_io_device_unregister(&io_target, NULL);
527 	/*
528 	 * There is an outstanding foreach call on the io_device, so the unregister should not
529 	 *  have removed the device.
530 	 */
531 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
532 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
533 	/*
534 	 * There is already a device registered at &io_target, so a new io_device should not
535 	 *  have been added to g_io_devices.
536 	 */
537 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
538 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
539 
540 	poll_thread(0);
541 	CU_ASSERT(ctx.ch_done == true);
542 	CU_ASSERT(ctx.foreach_done == true);
543 	/*
544 	 * There are no more foreach operations outstanding, so we can unregister the device,
545 	 *  even though a channel still exists for the device.
546 	 */
547 	spdk_io_device_unregister(&io_target, NULL);
548 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
549 
550 	set_thread(0);
551 	spdk_put_io_channel(ch0);
552 
553 	poll_threads();
554 
555 	free_threads();
556 }
557 
558 static void
559 thread_name(void)
560 {
561 	struct spdk_thread *thread;
562 	const char *name;
563 
564 	spdk_thread_lib_init(NULL, 0);
565 
566 	/* Create thread with no name, which automatically generates one */
567 	thread = spdk_thread_create(NULL, NULL);
568 	spdk_set_thread(thread);
569 	thread = spdk_get_thread();
570 	SPDK_CU_ASSERT_FATAL(thread != NULL);
571 	name = spdk_thread_get_name(thread);
572 	CU_ASSERT(name != NULL);
573 	spdk_thread_exit(thread);
574 	spdk_thread_destroy(thread);
575 
576 	/* Create thread named "test_thread" */
577 	thread = spdk_thread_create("test_thread", NULL);
578 	spdk_set_thread(thread);
579 	thread = spdk_get_thread();
580 	SPDK_CU_ASSERT_FATAL(thread != NULL);
581 	name = spdk_thread_get_name(thread);
582 	SPDK_CU_ASSERT_FATAL(name != NULL);
583 	CU_ASSERT(strcmp(name, "test_thread") == 0);
584 	spdk_thread_exit(thread);
585 	spdk_thread_destroy(thread);
586 
587 	spdk_thread_lib_fini();
588 }
589 
590 static uint64_t g_device1;
591 static uint64_t g_device2;
592 static uint64_t g_device3;
593 
594 static uint64_t g_ctx1 = 0x1111;
595 static uint64_t g_ctx2 = 0x2222;
596 
597 static int g_create_cb_calls = 0;
598 static int g_destroy_cb_calls = 0;
599 
600 static int
601 create_cb_1(void *io_device, void *ctx_buf)
602 {
603 	CU_ASSERT(io_device == &g_device1);
604 	*(uint64_t *)ctx_buf = g_ctx1;
605 	g_create_cb_calls++;
606 	return 0;
607 }
608 
609 static void
610 destroy_cb_1(void *io_device, void *ctx_buf)
611 {
612 	CU_ASSERT(io_device == &g_device1);
613 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
614 	g_destroy_cb_calls++;
615 }
616 
617 static int
618 create_cb_2(void *io_device, void *ctx_buf)
619 {
620 	CU_ASSERT(io_device == &g_device2);
621 	*(uint64_t *)ctx_buf = g_ctx2;
622 	g_create_cb_calls++;
623 	return 0;
624 }
625 
626 static void
627 destroy_cb_2(void *io_device, void *ctx_buf)
628 {
629 	CU_ASSERT(io_device == &g_device2);
630 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
631 	g_destroy_cb_calls++;
632 }
633 
634 static void
635 channel(void)
636 {
637 	struct spdk_io_channel *ch1, *ch2;
638 	void *ctx;
639 
640 	allocate_threads(1);
641 	set_thread(0);
642 
643 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
644 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
645 
646 	g_create_cb_calls = 0;
647 	ch1 = spdk_get_io_channel(&g_device1);
648 	CU_ASSERT(g_create_cb_calls == 1);
649 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
650 
651 	g_create_cb_calls = 0;
652 	ch2 = spdk_get_io_channel(&g_device1);
653 	CU_ASSERT(g_create_cb_calls == 0);
654 	CU_ASSERT(ch1 == ch2);
655 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
656 
657 	g_destroy_cb_calls = 0;
658 	spdk_put_io_channel(ch2);
659 	poll_threads();
660 	CU_ASSERT(g_destroy_cb_calls == 0);
661 
662 	g_create_cb_calls = 0;
663 	ch2 = spdk_get_io_channel(&g_device2);
664 	CU_ASSERT(g_create_cb_calls == 1);
665 	CU_ASSERT(ch1 != ch2);
666 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
667 
668 	ctx = spdk_io_channel_get_ctx(ch2);
669 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
670 
671 	g_destroy_cb_calls = 0;
672 	spdk_put_io_channel(ch1);
673 	poll_threads();
674 	CU_ASSERT(g_destroy_cb_calls == 1);
675 
676 	g_destroy_cb_calls = 0;
677 	spdk_put_io_channel(ch2);
678 	poll_threads();
679 	CU_ASSERT(g_destroy_cb_calls == 1);
680 
681 	ch1 = spdk_get_io_channel(&g_device3);
682 	CU_ASSERT(ch1 == NULL);
683 
684 	spdk_io_device_unregister(&g_device1, NULL);
685 	poll_threads();
686 	spdk_io_device_unregister(&g_device2, NULL);
687 	poll_threads();
688 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
689 	free_threads();
690 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
691 }
692 
693 static int
694 create_cb(void *io_device, void *ctx_buf)
695 {
696 	uint64_t *refcnt = (uint64_t *)ctx_buf;
697 
698 	CU_ASSERT(*refcnt == 0);
699 	*refcnt = 1;
700 
701 	return 0;
702 }
703 
704 static void
705 destroy_cb(void *io_device, void *ctx_buf)
706 {
707 	uint64_t *refcnt = (uint64_t *)ctx_buf;
708 
709 	CU_ASSERT(*refcnt == 1);
710 	*refcnt = 0;
711 }
712 
713 /**
714  * This test is checking that a sequence of get, put, get, put without allowing
715  * the deferred put operation to complete doesn't result in releasing the memory
716  * for the channel twice.
717  */
718 static void
719 channel_destroy_races(void)
720 {
721 	uint64_t device;
722 	struct spdk_io_channel *ch;
723 
724 	allocate_threads(1);
725 	set_thread(0);
726 
727 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
728 
729 	ch = spdk_get_io_channel(&device);
730 	SPDK_CU_ASSERT_FATAL(ch != NULL);
731 
732 	spdk_put_io_channel(ch);
733 
734 	ch = spdk_get_io_channel(&device);
735 	SPDK_CU_ASSERT_FATAL(ch != NULL);
736 
737 	spdk_put_io_channel(ch);
738 	poll_threads();
739 
740 	spdk_io_device_unregister(&device, NULL);
741 	poll_threads();
742 
743 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
744 	free_threads();
745 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
746 }
747 
748 static void
749 thread_exit(void)
750 {
751 	struct spdk_thread *thread;
752 	struct spdk_io_channel *ch;
753 	struct spdk_poller *poller;
754 	void *ctx;
755 	bool done1 = false, done2 = false, poller_run = false;
756 	int rc __attribute__((unused));
757 
758 	allocate_threads(6);
759 
760 	/* Test all pending messages are reaped for the thread marked as exited. */
761 	set_thread(0);
762 	thread = spdk_get_thread();
763 
764 	/* Sending message to thread 0 will be accepted. */
765 	set_thread(1);
766 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
767 	CU_ASSERT(rc == 0);
768 	CU_ASSERT(!done1);
769 
770 	/* Mark thread 0 as exited. */
771 	set_thread(0);
772 	spdk_thread_exit(thread);
773 
774 	/* Sending message to thread 0 will be rejected. */
775 	set_thread(1);
776 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
777 	CU_ASSERT(rc == -EIO);
778 
779 	/* Thread 0 will reap pending message. */
780 	poll_thread(0);
781 	CU_ASSERT(done1 == true);
782 	CU_ASSERT(done2 == false);
783 
784 	/* Test releasing I/O channel is reaped even after the thread is marked
785 	 * as exited.
786 	 */
787 	set_thread(2);
788 
789 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
790 
791 	g_create_cb_calls = 0;
792 	ch = spdk_get_io_channel(&g_device1);
793 	CU_ASSERT(g_create_cb_calls == 1);
794 	SPDK_CU_ASSERT_FATAL(ch != NULL);
795 
796 	ctx = spdk_io_channel_get_ctx(ch);
797 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
798 
799 	g_destroy_cb_calls = 0;
800 	spdk_put_io_channel(ch);
801 
802 	thread = spdk_get_thread();
803 	spdk_thread_exit(thread);
804 
805 	/* Thread will not be able to get I/O channel after it is marked as exited. */
806 	ch = spdk_get_io_channel(&g_device1);
807 	CU_ASSERT(ch == NULL);
808 
809 	poll_threads();
810 	CU_ASSERT(g_destroy_cb_calls == 1);
811 
812 	spdk_io_device_unregister(&g_device1, NULL);
813 	poll_threads();
814 
815 	/* Test call spdk_thread_exit() is only once for a single thread. */
816 	set_thread(3);
817 
818 	thread = spdk_get_thread();
819 
820 	CU_ASSERT(spdk_thread_exit(thread) == 0);
821 	CU_ASSERT(spdk_thread_exit(thread) == -EINVAL);
822 
823 	/* Test if spdk_thread_exit() fails when there is any registered poller,
824 	 * and if no poller is executed after the thread is marked as exited.
825 	 */
826 	set_thread(4);
827 	thread = spdk_get_thread();
828 
829 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
830 	CU_ASSERT(poller != NULL);
831 
832 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
833 
834 	spdk_poller_pause(poller);
835 
836 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
837 
838 	poll_threads();
839 
840 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
841 
842 	spdk_poller_unregister(&poller);
843 
844 	CU_ASSERT(spdk_thread_exit(thread) == 0);
845 
846 	poll_threads();
847 
848 	CU_ASSERT(poller_run == false);
849 
850 	/* Test if spdk_thread_exit() fails when there is any active I/O channel. */
851 	set_thread(5);
852 	thread = spdk_get_thread();
853 
854 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
855 
856 	g_create_cb_calls = 0;
857 	ch = spdk_get_io_channel(&g_device1);
858 	CU_ASSERT(g_create_cb_calls == 1);
859 	CU_ASSERT(ch != NULL);
860 
861 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
862 
863 	g_destroy_cb_calls = 0;
864 	spdk_put_io_channel(ch);
865 	CU_ASSERT(g_destroy_cb_calls == 0);
866 
867 	CU_ASSERT(spdk_thread_exit(thread) == 0);
868 
869 	poll_threads();
870 	CU_ASSERT(g_destroy_cb_calls == 1);
871 
872 	spdk_io_device_unregister(&g_device1, NULL);
873 
874 	CU_ASSERT(TAILQ_EMPTY(&thread->io_channels));
875 
876 	free_threads();
877 }
878 
879 int
880 main(int argc, char **argv)
881 {
882 	CU_pSuite	suite = NULL;
883 	unsigned int	num_failures;
884 
885 	if (CU_initialize_registry() != CUE_SUCCESS) {
886 		return CU_get_error();
887 	}
888 
889 	suite = CU_add_suite("io_channel", NULL, NULL);
890 	if (suite == NULL) {
891 		CU_cleanup_registry();
892 		return CU_get_error();
893 	}
894 
895 	if (
896 		CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
897 		CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
898 		CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
899 		CU_add_test(suite, "poller_pause", poller_pause) == NULL ||
900 		CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
901 		CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
902 		CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
903 		CU_add_test(suite, "thread_name", thread_name) == NULL ||
904 		CU_add_test(suite, "channel", channel) == NULL ||
905 		CU_add_test(suite, "channel_destroy_races", channel_destroy_races) == NULL ||
906 		CU_add_test(suite, "thread_exit", thread_exit) == NULL
907 	) {
908 		CU_cleanup_registry();
909 		return CU_get_error();
910 	}
911 
912 	CU_basic_set_mode(CU_BRM_VERBOSE);
913 	CU_basic_run_tests();
914 	num_failures = CU_get_number_of_failures();
915 	CU_cleanup_registry();
916 	return num_failures;
917 }
918