xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static void
52 thread_alloc(void)
53 {
54 	struct spdk_thread *thread;
55 
56 	/* No schedule callback */
57 	spdk_thread_lib_init(NULL, 0);
58 	thread = spdk_thread_create(NULL, NULL);
59 	SPDK_CU_ASSERT_FATAL(thread != NULL);
60 	spdk_set_thread(thread);
61 	spdk_thread_exit(thread);
62 	spdk_thread_destroy(thread);
63 	spdk_thread_lib_fini();
64 
65 	/* Schedule callback exists */
66 	spdk_thread_lib_init(_thread_schedule, 0);
67 
68 	/* Scheduling succeeds */
69 	g_sched_rc = 0;
70 	thread = spdk_thread_create(NULL, NULL);
71 	SPDK_CU_ASSERT_FATAL(thread != NULL);
72 	spdk_set_thread(thread);
73 	spdk_thread_exit(thread);
74 	spdk_thread_destroy(thread);
75 
76 	/* Scheduling fails */
77 	g_sched_rc = -1;
78 	thread = spdk_thread_create(NULL, NULL);
79 	SPDK_CU_ASSERT_FATAL(thread == NULL);
80 
81 	spdk_thread_lib_fini();
82 }
83 
84 static void
85 send_msg_cb(void *ctx)
86 {
87 	bool *done = ctx;
88 
89 	*done = true;
90 }
91 
92 static void
93 thread_send_msg(void)
94 {
95 	struct spdk_thread *thread0;
96 	bool done = false;
97 
98 	allocate_threads(2);
99 	set_thread(0);
100 	thread0 = spdk_get_thread();
101 
102 	set_thread(1);
103 	/* Simulate thread 1 sending a message to thread 0. */
104 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
105 
106 	/* We have not polled thread 0 yet, so done should be false. */
107 	CU_ASSERT(!done);
108 
109 	/*
110 	 * Poll thread 1.  The message was sent to thread 0, so this should be
111 	 *  a nop and done should still be false.
112 	 */
113 	poll_thread(1);
114 	CU_ASSERT(!done);
115 
116 	/*
117 	 * Poll thread 0.  This should execute the message and done should then
118 	 *  be true.
119 	 */
120 	poll_thread(0);
121 	CU_ASSERT(done);
122 
123 	free_threads();
124 }
125 
126 static int
127 poller_run_done(void *ctx)
128 {
129 	bool	*poller_run = ctx;
130 
131 	*poller_run = true;
132 
133 	return -1;
134 }
135 
136 static void
137 thread_poller(void)
138 {
139 	struct spdk_poller	*poller = NULL;
140 	bool			poller_run = false;
141 
142 	allocate_threads(1);
143 
144 	set_thread(0);
145 	MOCK_SET(spdk_get_ticks, 0);
146 	/* Register a poller with no-wait time and test execution */
147 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
148 	CU_ASSERT(poller != NULL);
149 
150 	poll_threads();
151 	CU_ASSERT(poller_run == true);
152 
153 	spdk_poller_unregister(&poller);
154 	CU_ASSERT(poller == NULL);
155 
156 	/* Register a poller with 1000us wait time and test single execution */
157 	poller_run = false;
158 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
159 	CU_ASSERT(poller != NULL);
160 
161 	poll_threads();
162 	CU_ASSERT(poller_run == false);
163 
164 	spdk_delay_us(1000);
165 	poll_threads();
166 	CU_ASSERT(poller_run == true);
167 
168 	poller_run = false;
169 	poll_threads();
170 	CU_ASSERT(poller_run == false);
171 
172 	spdk_delay_us(1000);
173 	poll_threads();
174 	CU_ASSERT(poller_run == true);
175 
176 	spdk_poller_unregister(&poller);
177 	CU_ASSERT(poller == NULL);
178 
179 	free_threads();
180 }
181 
182 static void
183 for_each_cb(void *ctx)
184 {
185 	int *count = ctx;
186 
187 	(*count)++;
188 }
189 
190 static void
191 thread_for_each(void)
192 {
193 	int count = 0;
194 	int i;
195 
196 	allocate_threads(3);
197 	set_thread(0);
198 
199 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
200 
201 	/* We have not polled thread 0 yet, so count should be 0 */
202 	CU_ASSERT(count == 0);
203 
204 	/* Poll each thread to verify the message is passed to each */
205 	for (i = 0; i < 3; i++) {
206 		poll_thread(i);
207 		CU_ASSERT(count == (i + 1));
208 	}
209 
210 	/*
211 	 * After each thread is called, the completion calls it
212 	 * one more time.
213 	 */
214 	poll_thread(0);
215 	CU_ASSERT(count == 4);
216 
217 	free_threads();
218 }
219 
220 static int
221 channel_create(void *io_device, void *ctx_buf)
222 {
223 	return 0;
224 }
225 
226 static void
227 channel_destroy(void *io_device, void *ctx_buf)
228 {
229 }
230 
231 static void
232 channel_msg(struct spdk_io_channel_iter *i)
233 {
234 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
235 	int *count = spdk_io_channel_get_ctx(ch);
236 
237 	(*count)++;
238 
239 	spdk_for_each_channel_continue(i, 0);
240 }
241 
242 static void
243 channel_cpl(struct spdk_io_channel_iter *i, int status)
244 {
245 }
246 
247 static void
248 for_each_channel_remove(void)
249 {
250 	struct spdk_io_channel *ch0, *ch1, *ch2;
251 	int io_target;
252 	int count = 0;
253 
254 	allocate_threads(3);
255 	set_thread(0);
256 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
257 	ch0 = spdk_get_io_channel(&io_target);
258 	set_thread(1);
259 	ch1 = spdk_get_io_channel(&io_target);
260 	set_thread(2);
261 	ch2 = spdk_get_io_channel(&io_target);
262 
263 	/*
264 	 * Test that io_channel handles the case where we start to iterate through
265 	 *  the channels, and during the iteration, one of the channels is deleted.
266 	 * This is done in some different and sometimes non-intuitive orders, because
267 	 *  some operations are deferred and won't execute until their threads are
268 	 *  polled.
269 	 *
270 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
271 	 */
272 	set_thread(0);
273 	spdk_put_io_channel(ch0);
274 	poll_threads();
275 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
276 	poll_threads();
277 
278 	/*
279 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
280 	 *  thread 0 is polled.
281 	 */
282 	ch0 = spdk_get_io_channel(&io_target);
283 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
284 	spdk_put_io_channel(ch0);
285 	poll_threads();
286 
287 	set_thread(1);
288 	spdk_put_io_channel(ch1);
289 	set_thread(2);
290 	spdk_put_io_channel(ch2);
291 	spdk_io_device_unregister(&io_target, NULL);
292 	poll_threads();
293 
294 	free_threads();
295 }
296 
297 struct unreg_ctx {
298 	bool	ch_done;
299 	bool	foreach_done;
300 };
301 
302 static void
303 unreg_ch_done(struct spdk_io_channel_iter *i)
304 {
305 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
306 
307 	ctx->ch_done = true;
308 
309 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
310 	spdk_for_each_channel_continue(i, 0);
311 }
312 
313 static void
314 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
315 {
316 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
317 
318 	ctx->foreach_done = true;
319 }
320 
321 static void
322 for_each_channel_unreg(void)
323 {
324 	struct spdk_io_channel *ch0;
325 	struct io_device *dev;
326 	struct unreg_ctx ctx = {};
327 	int io_target;
328 
329 	allocate_threads(1);
330 	set_thread(0);
331 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
332 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
333 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
334 	dev = TAILQ_FIRST(&g_io_devices);
335 	SPDK_CU_ASSERT_FATAL(dev != NULL);
336 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
337 	ch0 = spdk_get_io_channel(&io_target);
338 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
339 
340 	spdk_io_device_unregister(&io_target, NULL);
341 	/*
342 	 * There is an outstanding foreach call on the io_device, so the unregister should not
343 	 *  have removed the device.
344 	 */
345 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
346 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
347 	/*
348 	 * There is already a device registered at &io_target, so a new io_device should not
349 	 *  have been added to g_io_devices.
350 	 */
351 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
352 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
353 
354 	poll_thread(0);
355 	CU_ASSERT(ctx.ch_done == true);
356 	CU_ASSERT(ctx.foreach_done == true);
357 	/*
358 	 * There are no more foreach operations outstanding, so we can unregister the device,
359 	 *  even though a channel still exists for the device.
360 	 */
361 	spdk_io_device_unregister(&io_target, NULL);
362 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
363 
364 	set_thread(0);
365 	spdk_put_io_channel(ch0);
366 
367 	poll_threads();
368 
369 	free_threads();
370 }
371 
372 static void
373 thread_name(void)
374 {
375 	struct spdk_thread *thread;
376 	const char *name;
377 
378 	spdk_thread_lib_init(NULL, 0);
379 
380 	/* Create thread with no name, which automatically generates one */
381 	thread = spdk_thread_create(NULL, NULL);
382 	spdk_set_thread(thread);
383 	thread = spdk_get_thread();
384 	SPDK_CU_ASSERT_FATAL(thread != NULL);
385 	name = spdk_thread_get_name(thread);
386 	CU_ASSERT(name != NULL);
387 	spdk_thread_exit(thread);
388 	spdk_thread_destroy(thread);
389 
390 	/* Create thread named "test_thread" */
391 	thread = spdk_thread_create("test_thread", NULL);
392 	spdk_set_thread(thread);
393 	thread = spdk_get_thread();
394 	SPDK_CU_ASSERT_FATAL(thread != NULL);
395 	name = spdk_thread_get_name(thread);
396 	SPDK_CU_ASSERT_FATAL(name != NULL);
397 	CU_ASSERT(strcmp(name, "test_thread") == 0);
398 	spdk_thread_exit(thread);
399 	spdk_thread_destroy(thread);
400 
401 	spdk_thread_lib_fini();
402 }
403 
404 static uint64_t device1;
405 static uint64_t device2;
406 static uint64_t device3;
407 
408 static uint64_t ctx1 = 0x1111;
409 static uint64_t ctx2 = 0x2222;
410 
411 static int g_create_cb_calls = 0;
412 static int g_destroy_cb_calls = 0;
413 
414 static int
415 create_cb_1(void *io_device, void *ctx_buf)
416 {
417 	CU_ASSERT(io_device == &device1);
418 	*(uint64_t *)ctx_buf = ctx1;
419 	g_create_cb_calls++;
420 	return 0;
421 }
422 
423 static void
424 destroy_cb_1(void *io_device, void *ctx_buf)
425 {
426 	CU_ASSERT(io_device == &device1);
427 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx1);
428 	g_destroy_cb_calls++;
429 }
430 
431 static int
432 create_cb_2(void *io_device, void *ctx_buf)
433 {
434 	CU_ASSERT(io_device == &device2);
435 	*(uint64_t *)ctx_buf = ctx2;
436 	g_create_cb_calls++;
437 	return 0;
438 }
439 
440 static void
441 destroy_cb_2(void *io_device, void *ctx_buf)
442 {
443 	CU_ASSERT(io_device == &device2);
444 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx2);
445 	g_destroy_cb_calls++;
446 }
447 
448 static void
449 channel(void)
450 {
451 	struct spdk_io_channel *ch1, *ch2;
452 	void *ctx;
453 
454 	allocate_threads(1);
455 	set_thread(0);
456 
457 	spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL);
458 	spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL);
459 
460 	g_create_cb_calls = 0;
461 	ch1 = spdk_get_io_channel(&device1);
462 	CU_ASSERT(g_create_cb_calls == 1);
463 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
464 
465 	g_create_cb_calls = 0;
466 	ch2 = spdk_get_io_channel(&device1);
467 	CU_ASSERT(g_create_cb_calls == 0);
468 	CU_ASSERT(ch1 == ch2);
469 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
470 
471 	g_destroy_cb_calls = 0;
472 	spdk_put_io_channel(ch2);
473 	poll_threads();
474 	CU_ASSERT(g_destroy_cb_calls == 0);
475 
476 	g_create_cb_calls = 0;
477 	ch2 = spdk_get_io_channel(&device2);
478 	CU_ASSERT(g_create_cb_calls == 1);
479 	CU_ASSERT(ch1 != ch2);
480 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
481 
482 	ctx = spdk_io_channel_get_ctx(ch2);
483 	CU_ASSERT(*(uint64_t *)ctx == ctx2);
484 
485 	g_destroy_cb_calls = 0;
486 	spdk_put_io_channel(ch1);
487 	poll_threads();
488 	CU_ASSERT(g_destroy_cb_calls == 1);
489 
490 	g_destroy_cb_calls = 0;
491 	spdk_put_io_channel(ch2);
492 	poll_threads();
493 	CU_ASSERT(g_destroy_cb_calls == 1);
494 
495 	ch1 = spdk_get_io_channel(&device3);
496 	CU_ASSERT(ch1 == NULL);
497 
498 	spdk_io_device_unregister(&device1, NULL);
499 	poll_threads();
500 	spdk_io_device_unregister(&device2, NULL);
501 	poll_threads();
502 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
503 	free_threads();
504 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
505 }
506 
507 static int
508 create_cb(void *io_device, void *ctx_buf)
509 {
510 	uint64_t *refcnt = (uint64_t *)ctx_buf;
511 
512 	CU_ASSERT(*refcnt == 0);
513 	*refcnt = 1;
514 
515 	return 0;
516 }
517 
518 static void
519 destroy_cb(void *io_device, void *ctx_buf)
520 {
521 	uint64_t *refcnt = (uint64_t *)ctx_buf;
522 
523 	CU_ASSERT(*refcnt == 1);
524 	*refcnt = 0;
525 }
526 
527 /**
528  * This test is checking that a sequence of get, put, get, put without allowing
529  * the deferred put operation to complete doesn't result in releasing the memory
530  * for the channel twice.
531  */
532 static void
533 channel_destroy_races(void)
534 {
535 	uint64_t device;
536 	struct spdk_io_channel *ch;
537 
538 	allocate_threads(1);
539 	set_thread(0);
540 
541 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
542 
543 	ch = spdk_get_io_channel(&device);
544 	SPDK_CU_ASSERT_FATAL(ch != NULL);
545 
546 	spdk_put_io_channel(ch);
547 
548 	ch = spdk_get_io_channel(&device);
549 	SPDK_CU_ASSERT_FATAL(ch != NULL);
550 
551 	spdk_put_io_channel(ch);
552 	poll_threads();
553 
554 	spdk_io_device_unregister(&device, NULL);
555 	poll_threads();
556 
557 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
558 	free_threads();
559 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
560 }
561 
562 int
563 main(int argc, char **argv)
564 {
565 	CU_pSuite	suite = NULL;
566 	unsigned int	num_failures;
567 
568 	if (CU_initialize_registry() != CUE_SUCCESS) {
569 		return CU_get_error();
570 	}
571 
572 	suite = CU_add_suite("io_channel", NULL, NULL);
573 	if (suite == NULL) {
574 		CU_cleanup_registry();
575 		return CU_get_error();
576 	}
577 
578 	if (
579 		CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
580 		CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
581 		CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
582 		CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
583 		CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
584 		CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
585 		CU_add_test(suite, "thread_name", thread_name) == NULL ||
586 		CU_add_test(suite, "channel", channel) == NULL ||
587 		CU_add_test(suite, "channel_destroy_races", channel_destroy_races) == NULL
588 	) {
589 		CU_cleanup_registry();
590 		return CU_get_error();
591 	}
592 
593 	CU_basic_set_mode(CU_BRM_VERBOSE);
594 	CU_basic_run_tests();
595 	num_failures = CU_get_number_of_failures();
596 	CU_cleanup_registry();
597 	return num_failures;
598 }
599