xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 78b696bca594269c6a5e0ac235e44e94fc69e4f8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static bool
52 _thread_op_supported(enum spdk_thread_op op)
53 {
54 	switch (op) {
55 	case SPDK_THREAD_OP_NEW:
56 		return true;
57 	default:
58 		return false;
59 	}
60 }
61 
62 static int
63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
64 {
65 	switch (op) {
66 	case SPDK_THREAD_OP_NEW:
67 		return _thread_schedule(thread);
68 	default:
69 		return -ENOTSUP;
70 	}
71 }
72 
73 static void
74 thread_alloc(void)
75 {
76 	struct spdk_thread *thread;
77 
78 	/* No schedule callback */
79 	spdk_thread_lib_init(NULL, 0);
80 	thread = spdk_thread_create(NULL, NULL);
81 	SPDK_CU_ASSERT_FATAL(thread != NULL);
82 	spdk_set_thread(thread);
83 	spdk_thread_exit(thread);
84 	spdk_thread_destroy(thread);
85 	spdk_thread_lib_fini();
86 
87 	/* Schedule callback exists */
88 	spdk_thread_lib_init(_thread_schedule, 0);
89 
90 	/* Scheduling succeeds */
91 	g_sched_rc = 0;
92 	thread = spdk_thread_create(NULL, NULL);
93 	SPDK_CU_ASSERT_FATAL(thread != NULL);
94 	spdk_set_thread(thread);
95 	spdk_thread_exit(thread);
96 	spdk_thread_destroy(thread);
97 
98 	/* Scheduling fails */
99 	g_sched_rc = -1;
100 	thread = spdk_thread_create(NULL, NULL);
101 	SPDK_CU_ASSERT_FATAL(thread == NULL);
102 
103 	spdk_thread_lib_fini();
104 
105 	/* Scheduling callback exists with extended thread library initialization. */
106 	spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0);
107 
108 	/* Scheduling succeeds */
109 	g_sched_rc = 0;
110 	thread = spdk_thread_create(NULL, NULL);
111 	SPDK_CU_ASSERT_FATAL(thread != NULL);
112 	spdk_set_thread(thread);
113 	spdk_thread_exit(thread);
114 	spdk_thread_destroy(thread);
115 
116 	/* Scheduling fails */
117 	g_sched_rc = -1;
118 	thread = spdk_thread_create(NULL, NULL);
119 	SPDK_CU_ASSERT_FATAL(thread == NULL);
120 
121 	spdk_thread_lib_fini();
122 }
123 
124 static void
125 send_msg_cb(void *ctx)
126 {
127 	bool *done = ctx;
128 
129 	*done = true;
130 }
131 
132 static void
133 thread_send_msg(void)
134 {
135 	struct spdk_thread *thread0;
136 	bool done = false;
137 
138 	allocate_threads(2);
139 	set_thread(0);
140 	thread0 = spdk_get_thread();
141 
142 	set_thread(1);
143 	/* Simulate thread 1 sending a message to thread 0. */
144 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
145 
146 	/* We have not polled thread 0 yet, so done should be false. */
147 	CU_ASSERT(!done);
148 
149 	/*
150 	 * Poll thread 1.  The message was sent to thread 0, so this should be
151 	 *  a nop and done should still be false.
152 	 */
153 	poll_thread(1);
154 	CU_ASSERT(!done);
155 
156 	/*
157 	 * Poll thread 0.  This should execute the message and done should then
158 	 *  be true.
159 	 */
160 	poll_thread(0);
161 	CU_ASSERT(done);
162 
163 	free_threads();
164 }
165 
166 static int
167 poller_run_done(void *ctx)
168 {
169 	bool	*poller_run = ctx;
170 
171 	*poller_run = true;
172 
173 	return -1;
174 }
175 
176 static void
177 thread_poller(void)
178 {
179 	struct spdk_poller	*poller = NULL;
180 	bool			poller_run = false;
181 
182 	allocate_threads(1);
183 
184 	set_thread(0);
185 	MOCK_SET(spdk_get_ticks, 0);
186 	/* Register a poller with no-wait time and test execution */
187 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
188 	CU_ASSERT(poller != NULL);
189 
190 	poll_threads();
191 	CU_ASSERT(poller_run == true);
192 
193 	spdk_poller_unregister(&poller);
194 	CU_ASSERT(poller == NULL);
195 
196 	/* Register a poller with 1000us wait time and test single execution */
197 	poller_run = false;
198 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
199 	CU_ASSERT(poller != NULL);
200 
201 	poll_threads();
202 	CU_ASSERT(poller_run == false);
203 
204 	spdk_delay_us(1000);
205 	poll_threads();
206 	CU_ASSERT(poller_run == true);
207 
208 	poller_run = false;
209 	poll_threads();
210 	CU_ASSERT(poller_run == false);
211 
212 	spdk_delay_us(1000);
213 	poll_threads();
214 	CU_ASSERT(poller_run == true);
215 
216 	spdk_poller_unregister(&poller);
217 	CU_ASSERT(poller == NULL);
218 
219 	free_threads();
220 }
221 
222 struct poller_ctx {
223 	struct spdk_poller	*poller;
224 	bool			run;
225 };
226 
227 static int
228 poller_run_pause(void *ctx)
229 {
230 	struct poller_ctx *poller_ctx = ctx;
231 
232 	poller_ctx->run = true;
233 	spdk_poller_pause(poller_ctx->poller);
234 
235 	return 0;
236 }
237 
238 static void
239 poller_msg_pause_cb(void *ctx)
240 {
241 	struct spdk_poller *poller = ctx;
242 
243 	spdk_poller_pause(poller);
244 }
245 
246 static void
247 poller_msg_resume_cb(void *ctx)
248 {
249 	struct spdk_poller *poller = ctx;
250 
251 	spdk_poller_resume(poller);
252 }
253 
254 static void
255 poller_pause(void)
256 {
257 	struct poller_ctx poller_ctx = {};
258 	unsigned int delay[] = { 0, 1000 };
259 	unsigned int i;
260 
261 	allocate_threads(1);
262 	set_thread(0);
263 
264 	/* Register a poller that pauses itself */
265 	poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0);
266 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
267 
268 	poller_ctx.run = false;
269 	poll_threads();
270 	CU_ASSERT_EQUAL(poller_ctx.run, true);
271 
272 	poller_ctx.run = false;
273 	poll_threads();
274 	CU_ASSERT_EQUAL(poller_ctx.run, false);
275 
276 	spdk_poller_unregister(&poller_ctx.poller);
277 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
278 
279 	/* Verify that resuming an unpaused poller doesn't do anything */
280 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
281 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
282 
283 	spdk_poller_resume(poller_ctx.poller);
284 
285 	poller_ctx.run = false;
286 	poll_threads();
287 	CU_ASSERT_EQUAL(poller_ctx.run, true);
288 
289 	/* Verify that pausing the same poller twice works too */
290 	spdk_poller_pause(poller_ctx.poller);
291 
292 	poller_ctx.run = false;
293 	poll_threads();
294 	CU_ASSERT_EQUAL(poller_ctx.run, false);
295 
296 	spdk_poller_pause(poller_ctx.poller);
297 	poll_threads();
298 	CU_ASSERT_EQUAL(poller_ctx.run, false);
299 
300 	spdk_poller_resume(poller_ctx.poller);
301 	poll_threads();
302 	CU_ASSERT_EQUAL(poller_ctx.run, true);
303 
304 	/* Verify that a poller is run when it's resumed immediately after pausing */
305 	poller_ctx.run = false;
306 	spdk_poller_pause(poller_ctx.poller);
307 	spdk_poller_resume(poller_ctx.poller);
308 	poll_threads();
309 	CU_ASSERT_EQUAL(poller_ctx.run, true);
310 
311 	spdk_poller_unregister(&poller_ctx.poller);
312 	CU_ASSERT_PTR_NULL(poller_ctx.poller);
313 
314 	/* Poll the thread to make sure the previous poller gets unregistered */
315 	poll_threads();
316 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
317 
318 	/* Verify that it's possible to unregister a paused poller */
319 	poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0);
320 	CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
321 
322 	poller_ctx.run = false;
323 	poll_threads();
324 	CU_ASSERT_EQUAL(poller_ctx.run, true);
325 
326 	spdk_poller_pause(poller_ctx.poller);
327 
328 	poller_ctx.run = false;
329 	poll_threads();
330 	CU_ASSERT_EQUAL(poller_ctx.run, false);
331 
332 	spdk_poller_unregister(&poller_ctx.poller);
333 
334 	poll_threads();
335 	CU_ASSERT_EQUAL(poller_ctx.run, false);
336 	CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false);
337 
338 	/* Register pollers with 0 and 1000us wait time and pause/resume them */
339 	for (i = 0; i < SPDK_COUNTOF(delay); ++i) {
340 		poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]);
341 		CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller);
342 
343 		spdk_delay_us(delay[i]);
344 		poller_ctx.run = false;
345 		poll_threads();
346 		CU_ASSERT_EQUAL(poller_ctx.run, true);
347 
348 		spdk_poller_pause(poller_ctx.poller);
349 
350 		spdk_delay_us(delay[i]);
351 		poller_ctx.run = false;
352 		poll_threads();
353 		CU_ASSERT_EQUAL(poller_ctx.run, false);
354 
355 		spdk_poller_resume(poller_ctx.poller);
356 
357 		spdk_delay_us(delay[i]);
358 		poll_threads();
359 		CU_ASSERT_EQUAL(poller_ctx.run, true);
360 
361 		/* Verify that the poller can be paused/resumed from spdk_thread_send_msg */
362 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller);
363 
364 		spdk_delay_us(delay[i]);
365 		poller_ctx.run = false;
366 		poll_threads();
367 		CU_ASSERT_EQUAL(poller_ctx.run, false);
368 
369 		spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller);
370 
371 		poll_threads();
372 		if (delay[i] > 0) {
373 			spdk_delay_us(delay[i]);
374 			poll_threads();
375 		}
376 		CU_ASSERT_EQUAL(poller_ctx.run, true);
377 
378 		spdk_poller_unregister(&poller_ctx.poller);
379 		CU_ASSERT_PTR_NULL(poller_ctx.poller);
380 	}
381 
382 	free_threads();
383 }
384 
385 static void
386 for_each_cb(void *ctx)
387 {
388 	int *count = ctx;
389 
390 	(*count)++;
391 }
392 
393 static void
394 thread_for_each(void)
395 {
396 	int count = 0;
397 	int i;
398 
399 	allocate_threads(3);
400 	set_thread(0);
401 
402 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
403 
404 	/* We have not polled thread 0 yet, so count should be 0 */
405 	CU_ASSERT(count == 0);
406 
407 	/* Poll each thread to verify the message is passed to each */
408 	for (i = 0; i < 3; i++) {
409 		poll_thread(i);
410 		CU_ASSERT(count == (i + 1));
411 	}
412 
413 	/*
414 	 * After each thread is called, the completion calls it
415 	 * one more time.
416 	 */
417 	poll_thread(0);
418 	CU_ASSERT(count == 4);
419 
420 	free_threads();
421 }
422 
423 static int
424 channel_create(void *io_device, void *ctx_buf)
425 {
426 	int *ch_count = io_device;
427 
428 	(*ch_count)++;
429 	return 0;
430 }
431 
432 static void
433 channel_destroy(void *io_device, void *ctx_buf)
434 {
435 	int *ch_count = io_device;
436 
437 	(*ch_count)--;
438 }
439 
440 static void
441 channel_msg(struct spdk_io_channel_iter *i)
442 {
443 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
444 
445 	(*msg_count)++;
446 	spdk_for_each_channel_continue(i, 0);
447 }
448 
449 static void
450 channel_cpl(struct spdk_io_channel_iter *i, int status)
451 {
452 	int *msg_count = spdk_io_channel_iter_get_ctx(i);
453 
454 	(*msg_count)++;
455 }
456 
457 static void
458 for_each_channel_remove(void)
459 {
460 	struct spdk_io_channel *ch0, *ch1, *ch2;
461 	int ch_count = 0;
462 	int msg_count = 0;
463 
464 	allocate_threads(3);
465 	set_thread(0);
466 	spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL);
467 	ch0 = spdk_get_io_channel(&ch_count);
468 	set_thread(1);
469 	ch1 = spdk_get_io_channel(&ch_count);
470 	set_thread(2);
471 	ch2 = spdk_get_io_channel(&ch_count);
472 	CU_ASSERT(ch_count == 3);
473 
474 	/*
475 	 * Test that io_channel handles the case where we start to iterate through
476 	 *  the channels, and during the iteration, one of the channels is deleted.
477 	 * This is done in some different and sometimes non-intuitive orders, because
478 	 *  some operations are deferred and won't execute until their threads are
479 	 *  polled.
480 	 *
481 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
482 	 */
483 	set_thread(0);
484 	spdk_put_io_channel(ch0);
485 	CU_ASSERT(ch_count == 3);
486 	poll_threads();
487 	CU_ASSERT(ch_count == 2);
488 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
489 	CU_ASSERT(msg_count == 0);
490 	poll_threads();
491 	CU_ASSERT(msg_count == 3);
492 
493 	msg_count = 0;
494 
495 	/*
496 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
497 	 *  thread 0 is polled.
498 	 */
499 	ch0 = spdk_get_io_channel(&ch_count);
500 	CU_ASSERT(ch_count == 3);
501 	spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl);
502 	spdk_put_io_channel(ch0);
503 	CU_ASSERT(ch_count == 3);
504 
505 	poll_threads();
506 	CU_ASSERT(ch_count == 2);
507 	CU_ASSERT(msg_count == 4);
508 	set_thread(1);
509 	spdk_put_io_channel(ch1);
510 	CU_ASSERT(ch_count == 2);
511 	set_thread(2);
512 	spdk_put_io_channel(ch2);
513 	CU_ASSERT(ch_count == 2);
514 	poll_threads();
515 	CU_ASSERT(ch_count == 0);
516 
517 	spdk_io_device_unregister(&ch_count, NULL);
518 	poll_threads();
519 
520 	free_threads();
521 }
522 
523 struct unreg_ctx {
524 	bool	ch_done;
525 	bool	foreach_done;
526 };
527 
528 static void
529 unreg_ch_done(struct spdk_io_channel_iter *i)
530 {
531 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
532 
533 	ctx->ch_done = true;
534 
535 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
536 	spdk_for_each_channel_continue(i, 0);
537 }
538 
539 static void
540 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
541 {
542 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
543 
544 	ctx->foreach_done = true;
545 }
546 
547 static void
548 for_each_channel_unreg(void)
549 {
550 	struct spdk_io_channel *ch0;
551 	struct io_device *dev;
552 	struct unreg_ctx ctx = {};
553 	int io_target = 0;
554 
555 	allocate_threads(1);
556 	set_thread(0);
557 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
558 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
559 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
560 	dev = TAILQ_FIRST(&g_io_devices);
561 	SPDK_CU_ASSERT_FATAL(dev != NULL);
562 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
563 	ch0 = spdk_get_io_channel(&io_target);
564 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
565 
566 	spdk_io_device_unregister(&io_target, NULL);
567 	/*
568 	 * There is an outstanding foreach call on the io_device, so the unregister should not
569 	 *  have removed the device.
570 	 */
571 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
572 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
573 	/*
574 	 * There is already a device registered at &io_target, so a new io_device should not
575 	 *  have been added to g_io_devices.
576 	 */
577 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
578 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
579 
580 	poll_thread(0);
581 	CU_ASSERT(ctx.ch_done == true);
582 	CU_ASSERT(ctx.foreach_done == true);
583 	/*
584 	 * There are no more foreach operations outstanding, so we can unregister the device,
585 	 *  even though a channel still exists for the device.
586 	 */
587 	spdk_io_device_unregister(&io_target, NULL);
588 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
589 
590 	set_thread(0);
591 	spdk_put_io_channel(ch0);
592 
593 	poll_threads();
594 
595 	free_threads();
596 }
597 
598 static void
599 thread_name(void)
600 {
601 	struct spdk_thread *thread;
602 	const char *name;
603 
604 	spdk_thread_lib_init(NULL, 0);
605 
606 	/* Create thread with no name, which automatically generates one */
607 	thread = spdk_thread_create(NULL, NULL);
608 	spdk_set_thread(thread);
609 	thread = spdk_get_thread();
610 	SPDK_CU_ASSERT_FATAL(thread != NULL);
611 	name = spdk_thread_get_name(thread);
612 	CU_ASSERT(name != NULL);
613 	spdk_thread_exit(thread);
614 	spdk_thread_destroy(thread);
615 
616 	/* Create thread named "test_thread" */
617 	thread = spdk_thread_create("test_thread", NULL);
618 	spdk_set_thread(thread);
619 	thread = spdk_get_thread();
620 	SPDK_CU_ASSERT_FATAL(thread != NULL);
621 	name = spdk_thread_get_name(thread);
622 	SPDK_CU_ASSERT_FATAL(name != NULL);
623 	CU_ASSERT(strcmp(name, "test_thread") == 0);
624 	spdk_thread_exit(thread);
625 	spdk_thread_destroy(thread);
626 
627 	spdk_thread_lib_fini();
628 }
629 
630 static uint64_t g_device1;
631 static uint64_t g_device2;
632 static uint64_t g_device3;
633 
634 static uint64_t g_ctx1 = 0x1111;
635 static uint64_t g_ctx2 = 0x2222;
636 
637 static int g_create_cb_calls = 0;
638 static int g_destroy_cb_calls = 0;
639 
640 static int
641 create_cb_1(void *io_device, void *ctx_buf)
642 {
643 	CU_ASSERT(io_device == &g_device1);
644 	*(uint64_t *)ctx_buf = g_ctx1;
645 	g_create_cb_calls++;
646 	return 0;
647 }
648 
649 static void
650 destroy_cb_1(void *io_device, void *ctx_buf)
651 {
652 	CU_ASSERT(io_device == &g_device1);
653 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1);
654 	g_destroy_cb_calls++;
655 }
656 
657 static int
658 create_cb_2(void *io_device, void *ctx_buf)
659 {
660 	CU_ASSERT(io_device == &g_device2);
661 	*(uint64_t *)ctx_buf = g_ctx2;
662 	g_create_cb_calls++;
663 	return 0;
664 }
665 
666 static void
667 destroy_cb_2(void *io_device, void *ctx_buf)
668 {
669 	CU_ASSERT(io_device == &g_device2);
670 	CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2);
671 	g_destroy_cb_calls++;
672 }
673 
674 static void
675 channel(void)
676 {
677 	struct spdk_io_channel *ch1, *ch2;
678 	void *ctx;
679 
680 	allocate_threads(1);
681 	set_thread(0);
682 
683 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
684 	spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL);
685 
686 	g_create_cb_calls = 0;
687 	ch1 = spdk_get_io_channel(&g_device1);
688 	CU_ASSERT(g_create_cb_calls == 1);
689 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
690 
691 	g_create_cb_calls = 0;
692 	ch2 = spdk_get_io_channel(&g_device1);
693 	CU_ASSERT(g_create_cb_calls == 0);
694 	CU_ASSERT(ch1 == ch2);
695 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
696 
697 	g_destroy_cb_calls = 0;
698 	spdk_put_io_channel(ch2);
699 	poll_threads();
700 	CU_ASSERT(g_destroy_cb_calls == 0);
701 
702 	g_create_cb_calls = 0;
703 	ch2 = spdk_get_io_channel(&g_device2);
704 	CU_ASSERT(g_create_cb_calls == 1);
705 	CU_ASSERT(ch1 != ch2);
706 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
707 
708 	ctx = spdk_io_channel_get_ctx(ch2);
709 	CU_ASSERT(*(uint64_t *)ctx == g_ctx2);
710 
711 	g_destroy_cb_calls = 0;
712 	spdk_put_io_channel(ch1);
713 	poll_threads();
714 	CU_ASSERT(g_destroy_cb_calls == 1);
715 
716 	g_destroy_cb_calls = 0;
717 	spdk_put_io_channel(ch2);
718 	poll_threads();
719 	CU_ASSERT(g_destroy_cb_calls == 1);
720 
721 	ch1 = spdk_get_io_channel(&g_device3);
722 	CU_ASSERT(ch1 == NULL);
723 
724 	spdk_io_device_unregister(&g_device1, NULL);
725 	poll_threads();
726 	spdk_io_device_unregister(&g_device2, NULL);
727 	poll_threads();
728 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
729 	free_threads();
730 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
731 }
732 
733 static int
734 create_cb(void *io_device, void *ctx_buf)
735 {
736 	uint64_t *refcnt = (uint64_t *)ctx_buf;
737 
738 	CU_ASSERT(*refcnt == 0);
739 	*refcnt = 1;
740 
741 	return 0;
742 }
743 
744 static void
745 destroy_cb(void *io_device, void *ctx_buf)
746 {
747 	uint64_t *refcnt = (uint64_t *)ctx_buf;
748 
749 	CU_ASSERT(*refcnt == 1);
750 	*refcnt = 0;
751 }
752 
753 /**
754  * This test is checking that a sequence of get, put, get, put without allowing
755  * the deferred put operation to complete doesn't result in releasing the memory
756  * for the channel twice.
757  */
758 static void
759 channel_destroy_races(void)
760 {
761 	uint64_t device;
762 	struct spdk_io_channel *ch;
763 
764 	allocate_threads(1);
765 	set_thread(0);
766 
767 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
768 
769 	ch = spdk_get_io_channel(&device);
770 	SPDK_CU_ASSERT_FATAL(ch != NULL);
771 
772 	spdk_put_io_channel(ch);
773 
774 	ch = spdk_get_io_channel(&device);
775 	SPDK_CU_ASSERT_FATAL(ch != NULL);
776 
777 	spdk_put_io_channel(ch);
778 	poll_threads();
779 
780 	spdk_io_device_unregister(&device, NULL);
781 	poll_threads();
782 
783 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
784 	free_threads();
785 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
786 }
787 
788 static void
789 thread_exit(void)
790 {
791 	struct spdk_thread *thread;
792 	struct spdk_io_channel *ch;
793 	struct spdk_poller *poller;
794 	void *ctx;
795 	bool done1 = false, done2 = false, poller_run = false;
796 	int rc __attribute__((unused));
797 
798 	allocate_threads(6);
799 
800 	/* Test all pending messages are reaped for the thread marked as exited. */
801 	set_thread(0);
802 	thread = spdk_get_thread();
803 
804 	/* Sending message to thread 0 will be accepted. */
805 	set_thread(1);
806 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done1);
807 	CU_ASSERT(rc == 0);
808 	CU_ASSERT(!done1);
809 
810 	/* Mark thread 0 as exited. */
811 	set_thread(0);
812 	spdk_thread_exit(thread);
813 
814 	/* Sending message to thread 0 will be rejected. */
815 	set_thread(1);
816 	rc = spdk_thread_send_msg(thread, send_msg_cb, &done2);
817 	CU_ASSERT(rc == -EIO);
818 
819 	/* Thread 0 will reap pending message. */
820 	poll_thread(0);
821 	CU_ASSERT(done1 == true);
822 	CU_ASSERT(done2 == false);
823 
824 	/* Test releasing I/O channel is reaped even after the thread is marked
825 	 * as exited.
826 	 */
827 	set_thread(2);
828 
829 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
830 
831 	g_create_cb_calls = 0;
832 	ch = spdk_get_io_channel(&g_device1);
833 	CU_ASSERT(g_create_cb_calls == 1);
834 	SPDK_CU_ASSERT_FATAL(ch != NULL);
835 
836 	ctx = spdk_io_channel_get_ctx(ch);
837 	CU_ASSERT(*(uint64_t *)ctx == g_ctx1);
838 
839 	g_destroy_cb_calls = 0;
840 	spdk_put_io_channel(ch);
841 
842 	thread = spdk_get_thread();
843 	spdk_thread_exit(thread);
844 
845 	/* Thread will not be able to get I/O channel after it is marked as exited. */
846 	ch = spdk_get_io_channel(&g_device1);
847 	CU_ASSERT(ch == NULL);
848 
849 	poll_threads();
850 	CU_ASSERT(g_destroy_cb_calls == 1);
851 
852 	spdk_io_device_unregister(&g_device1, NULL);
853 	poll_threads();
854 
855 	/* Test call spdk_thread_exit() is only once for a single thread. */
856 	set_thread(3);
857 
858 	thread = spdk_get_thread();
859 
860 	CU_ASSERT(spdk_thread_exit(thread) == 0);
861 	CU_ASSERT(spdk_thread_exit(thread) == -EINVAL);
862 
863 	/* Test if spdk_thread_exit() fails when there is any registered poller,
864 	 * and if no poller is executed after the thread is marked as exited.
865 	 */
866 	set_thread(4);
867 	thread = spdk_get_thread();
868 
869 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
870 	CU_ASSERT(poller != NULL);
871 
872 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
873 
874 	spdk_poller_pause(poller);
875 
876 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
877 
878 	poll_threads();
879 
880 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
881 
882 	spdk_poller_unregister(&poller);
883 
884 	CU_ASSERT(spdk_thread_exit(thread) == 0);
885 
886 	poll_threads();
887 
888 	CU_ASSERT(poller_run == false);
889 
890 	/* Test if spdk_thread_exit() fails when there is any active I/O channel. */
891 	set_thread(5);
892 	thread = spdk_get_thread();
893 
894 	spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL);
895 
896 	g_create_cb_calls = 0;
897 	ch = spdk_get_io_channel(&g_device1);
898 	CU_ASSERT(g_create_cb_calls == 1);
899 	CU_ASSERT(ch != NULL);
900 
901 	CU_ASSERT(spdk_thread_exit(thread) == -EBUSY);
902 
903 	g_destroy_cb_calls = 0;
904 	spdk_put_io_channel(ch);
905 	CU_ASSERT(g_destroy_cb_calls == 0);
906 
907 	CU_ASSERT(spdk_thread_exit(thread) == 0);
908 
909 	poll_threads();
910 	CU_ASSERT(g_destroy_cb_calls == 1);
911 
912 	spdk_io_device_unregister(&g_device1, NULL);
913 
914 	CU_ASSERT(TAILQ_EMPTY(&thread->io_channels));
915 
916 	free_threads();
917 }
918 
919 static int
920 poller_run_idle(void *ctx)
921 {
922 	uint64_t delay_us = (uint64_t)ctx;
923 
924 	spdk_delay_us(delay_us);
925 
926 	return 0;
927 }
928 
929 static int
930 poller_run_busy(void *ctx)
931 {
932 	uint64_t delay_us = (uint64_t)ctx;
933 
934 	spdk_delay_us(delay_us);
935 
936 	return 1;
937 }
938 
939 static void
940 thread_update_stats(void)
941 {
942 	struct spdk_poller	*poller;
943 	struct spdk_thread	*thread;
944 
945 	MOCK_SET(spdk_get_ticks, 10);
946 
947 	allocate_threads(1);
948 
949 	set_thread(0);
950 	thread = spdk_get_thread();
951 
952 	CU_ASSERT(thread->tsc_last == 10);
953 	CU_ASSERT(thread->stats.idle_tsc == 0);
954 	CU_ASSERT(thread->stats.busy_tsc == 0);
955 
956 	/* Test if idle_tsc is updated expectedly. */
957 	poller = spdk_poller_register(poller_run_idle, (void *)1000, 0);
958 	CU_ASSERT(poller != NULL);
959 
960 	spdk_delay_us(100);
961 
962 	poll_thread_times(0, 1);
963 
964 	CU_ASSERT(thread->tsc_last == 1110);
965 	CU_ASSERT(thread->stats.idle_tsc == 1000);
966 	CU_ASSERT(thread->stats.busy_tsc == 0);
967 
968 	spdk_delay_us(100);
969 
970 	poll_thread_times(0, 1);
971 
972 	CU_ASSERT(thread->tsc_last == 2210);
973 	CU_ASSERT(thread->stats.idle_tsc == 2000);
974 	CU_ASSERT(thread->stats.busy_tsc == 0);
975 
976 	spdk_poller_unregister(&poller);
977 
978 	/* Test if busy_tsc is updated expectedly. */
979 	poller = spdk_poller_register(poller_run_busy, (void *)100000, 0);
980 	CU_ASSERT(poller != NULL);
981 
982 	spdk_delay_us(10000);
983 
984 	poll_thread_times(0, 1);
985 
986 	CU_ASSERT(thread->tsc_last == 112210);
987 	CU_ASSERT(thread->stats.idle_tsc == 2000);
988 	CU_ASSERT(thread->stats.busy_tsc == 100000);
989 
990 	spdk_delay_us(10000);
991 
992 	poll_thread_times(0, 1);
993 
994 	CU_ASSERT(thread->tsc_last == 222210);
995 	CU_ASSERT(thread->stats.idle_tsc == 2000);
996 	CU_ASSERT(thread->stats.busy_tsc == 200000);
997 
998 	spdk_poller_unregister(&poller);
999 
1000 	free_threads();
1001 }
1002 
1003 int
1004 main(int argc, char **argv)
1005 {
1006 	CU_pSuite	suite = NULL;
1007 	unsigned int	num_failures;
1008 
1009 	CU_set_error_action(CUEA_ABORT);
1010 	CU_initialize_registry();
1011 
1012 	suite = CU_add_suite("io_channel", NULL, NULL);
1013 
1014 	CU_add_test(suite, "thread_alloc", thread_alloc);
1015 	CU_add_test(suite, "thread_send_msg", thread_send_msg);
1016 	CU_add_test(suite, "thread_poller", thread_poller);
1017 	CU_add_test(suite, "poller_pause", poller_pause);
1018 	CU_add_test(suite, "thread_for_each", thread_for_each);
1019 	CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove);
1020 	CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg);
1021 	CU_add_test(suite, "thread_name", thread_name);
1022 	CU_add_test(suite, "channel", channel);
1023 	CU_add_test(suite, "channel_destroy_races", channel_destroy_races);
1024 	CU_add_test(suite, "thread_exit", thread_exit);
1025 	CU_add_test(suite, "thread_update_stats", thread_update_stats);
1026 
1027 	CU_basic_set_mode(CU_BRM_VERBOSE);
1028 	CU_basic_run_tests();
1029 	num_failures = CU_get_number_of_failures();
1030 	CU_cleanup_registry();
1031 	return num_failures;
1032 }
1033