xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision dfbbcc74dd024a0189f95d7959bebb44e15eae4a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static int g_sched_rc = 0;
44 
45 static int
46 _thread_schedule(struct spdk_thread *thread)
47 {
48 	return g_sched_rc;
49 }
50 
51 static void
52 thread_alloc(void)
53 {
54 	struct spdk_thread *thread;
55 
56 	/* No schedule callback */
57 	spdk_thread_lib_init(NULL, 0);
58 	thread = spdk_thread_create(NULL, NULL);
59 	SPDK_CU_ASSERT_FATAL(thread != NULL);
60 	spdk_thread_exit(thread);
61 	spdk_thread_lib_fini();
62 
63 	/* Schedule callback exists */
64 	spdk_thread_lib_init(_thread_schedule, 0);
65 
66 	/* Scheduling succeeds */
67 	g_sched_rc = 0;
68 	thread = spdk_thread_create(NULL, NULL);
69 	SPDK_CU_ASSERT_FATAL(thread != NULL);
70 	spdk_thread_exit(thread);
71 
72 	/* Scheduling fails */
73 	g_sched_rc = -1;
74 	thread = spdk_thread_create(NULL, NULL);
75 	SPDK_CU_ASSERT_FATAL(thread == NULL);
76 
77 	spdk_thread_lib_fini();
78 }
79 
80 static void
81 send_msg_cb(void *ctx)
82 {
83 	bool *done = ctx;
84 
85 	*done = true;
86 }
87 
88 static void
89 thread_send_msg(void)
90 {
91 	struct spdk_thread *thread0;
92 	bool done = false;
93 
94 	allocate_threads(2);
95 	set_thread(0);
96 	thread0 = spdk_get_thread();
97 
98 	set_thread(1);
99 	/* Simulate thread 1 sending a message to thread 0. */
100 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
101 
102 	/* We have not polled thread 0 yet, so done should be false. */
103 	CU_ASSERT(!done);
104 
105 	/*
106 	 * Poll thread 1.  The message was sent to thread 0, so this should be
107 	 *  a nop and done should still be false.
108 	 */
109 	poll_thread(1);
110 	CU_ASSERT(!done);
111 
112 	/*
113 	 * Poll thread 0.  This should execute the message and done should then
114 	 *  be true.
115 	 */
116 	poll_thread(0);
117 	CU_ASSERT(done);
118 
119 	free_threads();
120 }
121 
122 static int
123 poller_run_done(void *ctx)
124 {
125 	bool	*poller_run = ctx;
126 
127 	*poller_run = true;
128 
129 	return -1;
130 }
131 
132 static void
133 thread_poller(void)
134 {
135 	struct spdk_poller	*poller = NULL;
136 	bool			poller_run = false;
137 
138 	allocate_threads(1);
139 
140 	set_thread(0);
141 	MOCK_SET(spdk_get_ticks, 0);
142 	/* Register a poller with no-wait time and test execution */
143 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
144 	CU_ASSERT(poller != NULL);
145 
146 	poll_threads();
147 	CU_ASSERT(poller_run == true);
148 
149 	spdk_poller_unregister(&poller);
150 	CU_ASSERT(poller == NULL);
151 
152 	/* Register a poller with 1000us wait time and test single execution */
153 	poller_run = false;
154 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
155 	CU_ASSERT(poller != NULL);
156 
157 	poll_threads();
158 	CU_ASSERT(poller_run == false);
159 
160 	spdk_delay_us(1000);
161 	poll_threads();
162 	CU_ASSERT(poller_run == true);
163 
164 	poller_run = false;
165 	poll_threads();
166 	CU_ASSERT(poller_run == false);
167 
168 	spdk_delay_us(1000);
169 	poll_threads();
170 	CU_ASSERT(poller_run == true);
171 
172 	spdk_poller_unregister(&poller);
173 	CU_ASSERT(poller == NULL);
174 
175 	free_threads();
176 }
177 
178 static void
179 for_each_cb(void *ctx)
180 {
181 	int *count = ctx;
182 
183 	(*count)++;
184 }
185 
186 static void
187 thread_for_each(void)
188 {
189 	int count = 0;
190 	int i;
191 
192 	allocate_threads(3);
193 	set_thread(0);
194 
195 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
196 
197 	/* We have not polled thread 0 yet, so count should be 0 */
198 	CU_ASSERT(count == 0);
199 
200 	/* Poll each thread to verify the message is passed to each */
201 	for (i = 0; i < 3; i++) {
202 		poll_thread(i);
203 		CU_ASSERT(count == (i + 1));
204 	}
205 
206 	/*
207 	 * After each thread is called, the completion calls it
208 	 * one more time.
209 	 */
210 	poll_thread(0);
211 	CU_ASSERT(count == 4);
212 
213 	free_threads();
214 }
215 
216 static int
217 channel_create(void *io_device, void *ctx_buf)
218 {
219 	return 0;
220 }
221 
222 static void
223 channel_destroy(void *io_device, void *ctx_buf)
224 {
225 }
226 
227 static void
228 channel_msg(struct spdk_io_channel_iter *i)
229 {
230 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
231 	int *count = spdk_io_channel_get_ctx(ch);
232 
233 	(*count)++;
234 
235 	spdk_for_each_channel_continue(i, 0);
236 }
237 
238 static void
239 channel_cpl(struct spdk_io_channel_iter *i, int status)
240 {
241 }
242 
243 static void
244 for_each_channel_remove(void)
245 {
246 	struct spdk_io_channel *ch0, *ch1, *ch2;
247 	int io_target;
248 	int count = 0;
249 
250 	allocate_threads(3);
251 	set_thread(0);
252 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
253 	ch0 = spdk_get_io_channel(&io_target);
254 	set_thread(1);
255 	ch1 = spdk_get_io_channel(&io_target);
256 	set_thread(2);
257 	ch2 = spdk_get_io_channel(&io_target);
258 
259 	/*
260 	 * Test that io_channel handles the case where we start to iterate through
261 	 *  the channels, and during the iteration, one of the channels is deleted.
262 	 * This is done in some different and sometimes non-intuitive orders, because
263 	 *  some operations are deferred and won't execute until their threads are
264 	 *  polled.
265 	 *
266 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
267 	 */
268 	set_thread(0);
269 	spdk_put_io_channel(ch0);
270 	poll_threads();
271 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
272 	poll_threads();
273 
274 	/*
275 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
276 	 *  thread 0 is polled.
277 	 */
278 	ch0 = spdk_get_io_channel(&io_target);
279 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
280 	spdk_put_io_channel(ch0);
281 	poll_threads();
282 
283 	set_thread(1);
284 	spdk_put_io_channel(ch1);
285 	set_thread(2);
286 	spdk_put_io_channel(ch2);
287 	spdk_io_device_unregister(&io_target, NULL);
288 	poll_threads();
289 
290 	free_threads();
291 }
292 
293 struct unreg_ctx {
294 	bool	ch_done;
295 	bool	foreach_done;
296 };
297 
298 static void
299 unreg_ch_done(struct spdk_io_channel_iter *i)
300 {
301 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
302 
303 	ctx->ch_done = true;
304 
305 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
306 	spdk_for_each_channel_continue(i, 0);
307 }
308 
309 static void
310 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
311 {
312 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
313 
314 	ctx->foreach_done = true;
315 }
316 
317 static void
318 for_each_channel_unreg(void)
319 {
320 	struct spdk_io_channel *ch0;
321 	struct io_device *dev;
322 	struct unreg_ctx ctx = {};
323 	int io_target;
324 
325 	allocate_threads(1);
326 	set_thread(0);
327 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
328 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
329 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
330 	dev = TAILQ_FIRST(&g_io_devices);
331 	SPDK_CU_ASSERT_FATAL(dev != NULL);
332 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
333 	ch0 = spdk_get_io_channel(&io_target);
334 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
335 
336 	spdk_io_device_unregister(&io_target, NULL);
337 	/*
338 	 * There is an outstanding foreach call on the io_device, so the unregister should not
339 	 *  have removed the device.
340 	 */
341 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
342 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
343 	/*
344 	 * There is already a device registered at &io_target, so a new io_device should not
345 	 *  have been added to g_io_devices.
346 	 */
347 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
348 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
349 
350 	poll_thread(0);
351 	CU_ASSERT(ctx.ch_done == true);
352 	CU_ASSERT(ctx.foreach_done == true);
353 	/*
354 	 * There are no more foreach operations outstanding, so we can unregister the device,
355 	 *  even though a channel still exists for the device.
356 	 */
357 	spdk_io_device_unregister(&io_target, NULL);
358 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
359 
360 	set_thread(0);
361 	spdk_put_io_channel(ch0);
362 
363 	poll_threads();
364 
365 	free_threads();
366 }
367 
368 static void
369 thread_name(void)
370 {
371 	struct spdk_thread *thread;
372 	const char *name;
373 
374 	spdk_thread_lib_init(NULL, 0);
375 
376 	/* Create thread with no name, which automatically generates one */
377 	thread = spdk_thread_create(NULL, NULL);
378 	spdk_set_thread(thread);
379 	thread = spdk_get_thread();
380 	SPDK_CU_ASSERT_FATAL(thread != NULL);
381 	name = spdk_thread_get_name(thread);
382 	CU_ASSERT(name != NULL);
383 	spdk_thread_exit(thread);
384 
385 	/* Create thread named "test_thread" */
386 	thread = spdk_thread_create("test_thread", NULL);
387 	spdk_set_thread(thread);
388 	thread = spdk_get_thread();
389 	SPDK_CU_ASSERT_FATAL(thread != NULL);
390 	name = spdk_thread_get_name(thread);
391 	SPDK_CU_ASSERT_FATAL(name != NULL);
392 	CU_ASSERT(strcmp(name, "test_thread") == 0);
393 	spdk_thread_exit(thread);
394 
395 	spdk_thread_lib_fini();
396 }
397 
398 static uint64_t device1;
399 static uint64_t device2;
400 static uint64_t device3;
401 
402 static uint64_t ctx1 = 0x1111;
403 static uint64_t ctx2 = 0x2222;
404 
405 static int g_create_cb_calls = 0;
406 static int g_destroy_cb_calls = 0;
407 
408 static int
409 create_cb_1(void *io_device, void *ctx_buf)
410 {
411 	CU_ASSERT(io_device == &device1);
412 	*(uint64_t *)ctx_buf = ctx1;
413 	g_create_cb_calls++;
414 	return 0;
415 }
416 
417 static void
418 destroy_cb_1(void *io_device, void *ctx_buf)
419 {
420 	CU_ASSERT(io_device == &device1);
421 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx1);
422 	g_destroy_cb_calls++;
423 }
424 
425 static int
426 create_cb_2(void *io_device, void *ctx_buf)
427 {
428 	CU_ASSERT(io_device == &device2);
429 	*(uint64_t *)ctx_buf = ctx2;
430 	g_create_cb_calls++;
431 	return 0;
432 }
433 
434 static void
435 destroy_cb_2(void *io_device, void *ctx_buf)
436 {
437 	CU_ASSERT(io_device == &device2);
438 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx2);
439 	g_destroy_cb_calls++;
440 }
441 
442 static void
443 channel(void)
444 {
445 	struct spdk_io_channel *ch1, *ch2;
446 	void *ctx;
447 
448 	allocate_threads(1);
449 	set_thread(0);
450 
451 	spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL);
452 	spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL);
453 
454 	g_create_cb_calls = 0;
455 	ch1 = spdk_get_io_channel(&device1);
456 	CU_ASSERT(g_create_cb_calls == 1);
457 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
458 
459 	g_create_cb_calls = 0;
460 	ch2 = spdk_get_io_channel(&device1);
461 	CU_ASSERT(g_create_cb_calls == 0);
462 	CU_ASSERT(ch1 == ch2);
463 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
464 
465 	g_destroy_cb_calls = 0;
466 	spdk_put_io_channel(ch2);
467 	poll_threads();
468 	CU_ASSERT(g_destroy_cb_calls == 0);
469 
470 	g_create_cb_calls = 0;
471 	ch2 = spdk_get_io_channel(&device2);
472 	CU_ASSERT(g_create_cb_calls == 1);
473 	CU_ASSERT(ch1 != ch2);
474 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
475 
476 	ctx = spdk_io_channel_get_ctx(ch2);
477 	CU_ASSERT(*(uint64_t *)ctx == ctx2);
478 
479 	g_destroy_cb_calls = 0;
480 	spdk_put_io_channel(ch1);
481 	poll_threads();
482 	CU_ASSERT(g_destroy_cb_calls == 1);
483 
484 	g_destroy_cb_calls = 0;
485 	spdk_put_io_channel(ch2);
486 	poll_threads();
487 	CU_ASSERT(g_destroy_cb_calls == 1);
488 
489 	ch1 = spdk_get_io_channel(&device3);
490 	CU_ASSERT(ch1 == NULL);
491 
492 	spdk_io_device_unregister(&device1, NULL);
493 	poll_threads();
494 	spdk_io_device_unregister(&device2, NULL);
495 	poll_threads();
496 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
497 	free_threads();
498 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
499 }
500 
501 static int
502 create_cb(void *io_device, void *ctx_buf)
503 {
504 	uint64_t *refcnt = (uint64_t *)ctx_buf;
505 
506 	CU_ASSERT(*refcnt == 0);
507 	*refcnt = 1;
508 
509 	return 0;
510 }
511 
512 static void
513 destroy_cb(void *io_device, void *ctx_buf)
514 {
515 	uint64_t *refcnt = (uint64_t *)ctx_buf;
516 
517 	CU_ASSERT(*refcnt == 1);
518 	*refcnt = 0;
519 }
520 
521 /**
522  * This test is checking that a sequence of get, put, get, put without allowing
523  * the deferred put operation to complete doesn't result in releasing the memory
524  * for the channel twice.
525  */
526 static void
527 channel_destroy_races(void)
528 {
529 	uint64_t device;
530 	struct spdk_io_channel *ch;
531 
532 	allocate_threads(1);
533 	set_thread(0);
534 
535 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
536 
537 	ch = spdk_get_io_channel(&device);
538 	SPDK_CU_ASSERT_FATAL(ch != NULL);
539 
540 	spdk_put_io_channel(ch);
541 
542 	ch = spdk_get_io_channel(&device);
543 	SPDK_CU_ASSERT_FATAL(ch != NULL);
544 
545 	spdk_put_io_channel(ch);
546 	poll_threads();
547 
548 	spdk_io_device_unregister(&device, NULL);
549 	poll_threads();
550 
551 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
552 	free_threads();
553 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
554 }
555 
556 int
557 main(int argc, char **argv)
558 {
559 	CU_pSuite	suite = NULL;
560 	unsigned int	num_failures;
561 
562 	if (CU_initialize_registry() != CUE_SUCCESS) {
563 		return CU_get_error();
564 	}
565 
566 	suite = CU_add_suite("io_channel", NULL, NULL);
567 	if (suite == NULL) {
568 		CU_cleanup_registry();
569 		return CU_get_error();
570 	}
571 
572 	if (
573 		CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
574 		CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
575 		CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
576 		CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
577 		CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
578 		CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
579 		CU_add_test(suite, "thread_name", thread_name) == NULL ||
580 		CU_add_test(suite, "channel", channel) == NULL ||
581 		CU_add_test(suite, "channel_destroy_races", channel_destroy_races) == NULL
582 	) {
583 		CU_cleanup_registry();
584 		return CU_get_error();
585 	}
586 
587 	CU_basic_set_mode(CU_BRM_VERBOSE);
588 	CU_basic_run_tests();
589 	num_failures = CU_get_number_of_failures();
590 	CU_cleanup_registry();
591 	return num_failures;
592 }
593