xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 552e21cce6cccbf833ed9109827e08337377d7ce)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/thread.h"
39 
40 #include "thread/thread.c"
41 #include "common/lib/ut_multithread.c"
42 
43 static void
44 thread_alloc(void)
45 {
46 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
47 	allocate_threads(1);
48 	CU_ASSERT(!TAILQ_EMPTY(&g_threads));
49 	free_threads();
50 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
51 }
52 
53 static void
54 send_msg_cb(void *ctx)
55 {
56 	bool *done = ctx;
57 
58 	*done = true;
59 }
60 
61 static void
62 thread_send_msg(void)
63 {
64 	struct spdk_thread *thread0;
65 	bool done = false;
66 
67 	allocate_threads(2);
68 	set_thread(0);
69 	thread0 = spdk_get_thread();
70 
71 	set_thread(1);
72 	/* Simulate thread 1 sending a message to thread 0. */
73 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
74 
75 	/* We have not polled thread 0 yet, so done should be false. */
76 	CU_ASSERT(!done);
77 
78 	/*
79 	 * Poll thread 1.  The message was sent to thread 0, so this should be
80 	 *  a nop and done should still be false.
81 	 */
82 	poll_thread(1);
83 	CU_ASSERT(!done);
84 
85 	/*
86 	 * Poll thread 0.  This should execute the message and done should then
87 	 *  be true.
88 	 */
89 	poll_thread(0);
90 	CU_ASSERT(done);
91 
92 	free_threads();
93 }
94 
95 static int
96 poller_run_done(void *ctx)
97 {
98 	bool	*poller_run = ctx;
99 
100 	*poller_run = true;
101 
102 	return -1;
103 }
104 
105 static void
106 thread_poller(void)
107 {
108 	struct spdk_poller	*poller = NULL;
109 	bool			poller_run = false;
110 
111 	allocate_threads(1);
112 
113 	set_thread(0);
114 	MOCK_SET(spdk_get_ticks, 0);
115 	/* Register a poller with no-wait time and test execution */
116 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
117 	CU_ASSERT(poller != NULL);
118 
119 	poll_threads();
120 	CU_ASSERT(poller_run == true);
121 
122 	spdk_poller_unregister(&poller);
123 	CU_ASSERT(poller == NULL);
124 
125 	/* Register a poller with 1000us wait time and test single execution */
126 	poller_run = false;
127 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
128 	CU_ASSERT(poller != NULL);
129 
130 	poll_threads();
131 	CU_ASSERT(poller_run == false);
132 
133 	spdk_delay_us(1000);
134 	poll_threads();
135 	CU_ASSERT(poller_run == true);
136 
137 	poller_run = false;
138 	poll_threads();
139 	CU_ASSERT(poller_run == false);
140 
141 	spdk_delay_us(1000);
142 	poll_threads();
143 	CU_ASSERT(poller_run == true);
144 
145 	spdk_poller_unregister(&poller);
146 	CU_ASSERT(poller == NULL);
147 
148 	free_threads();
149 }
150 
151 static void
152 for_each_cb(void *ctx)
153 {
154 	int *count = ctx;
155 
156 	(*count)++;
157 }
158 
159 static void
160 thread_for_each(void)
161 {
162 	int count = 0;
163 	int i;
164 
165 	allocate_threads(3);
166 	set_thread(0);
167 
168 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
169 
170 	/* We have not polled thread 0 yet, so count should be 0 */
171 	CU_ASSERT(count == 0);
172 
173 	/* Poll each thread to verify the message is passed to each */
174 	for (i = 0; i < 3; i++) {
175 		poll_thread(i);
176 		CU_ASSERT(count == (i + 1));
177 	}
178 
179 	/*
180 	 * After each thread is called, the completion calls it
181 	 * one more time.
182 	 */
183 	poll_thread(0);
184 	CU_ASSERT(count == 4);
185 
186 	free_threads();
187 }
188 
189 static int
190 channel_create(void *io_device, void *ctx_buf)
191 {
192 	return 0;
193 }
194 
195 static void
196 channel_destroy(void *io_device, void *ctx_buf)
197 {
198 }
199 
200 static void
201 channel_msg(struct spdk_io_channel_iter *i)
202 {
203 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
204 	int *count = spdk_io_channel_get_ctx(ch);
205 
206 	(*count)++;
207 
208 	spdk_for_each_channel_continue(i, 0);
209 }
210 
211 static void
212 channel_cpl(struct spdk_io_channel_iter *i, int status)
213 {
214 }
215 
216 static void
217 for_each_channel_remove(void)
218 {
219 	struct spdk_io_channel *ch0, *ch1, *ch2;
220 	int io_target;
221 	int count = 0;
222 
223 	allocate_threads(3);
224 	set_thread(0);
225 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
226 	ch0 = spdk_get_io_channel(&io_target);
227 	set_thread(1);
228 	ch1 = spdk_get_io_channel(&io_target);
229 	set_thread(2);
230 	ch2 = spdk_get_io_channel(&io_target);
231 
232 	/*
233 	 * Test that io_channel handles the case where we start to iterate through
234 	 *  the channels, and during the iteration, one of the channels is deleted.
235 	 * This is done in some different and sometimes non-intuitive orders, because
236 	 *  some operations are deferred and won't execute until their threads are
237 	 *  polled.
238 	 *
239 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
240 	 */
241 	set_thread(0);
242 	spdk_put_io_channel(ch0);
243 	poll_threads();
244 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
245 	poll_threads();
246 
247 	/*
248 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
249 	 *  thread 0 is polled.
250 	 */
251 	ch0 = spdk_get_io_channel(&io_target);
252 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
253 	spdk_put_io_channel(ch0);
254 	poll_threads();
255 
256 	set_thread(1);
257 	spdk_put_io_channel(ch1);
258 	set_thread(2);
259 	spdk_put_io_channel(ch2);
260 	spdk_io_device_unregister(&io_target, NULL);
261 	poll_threads();
262 
263 	free_threads();
264 }
265 
266 struct unreg_ctx {
267 	bool	ch_done;
268 	bool	foreach_done;
269 };
270 
271 static void
272 unreg_ch_done(struct spdk_io_channel_iter *i)
273 {
274 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
275 
276 	ctx->ch_done = true;
277 
278 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
279 	spdk_for_each_channel_continue(i, 0);
280 }
281 
282 static void
283 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
284 {
285 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
286 
287 	ctx->foreach_done = true;
288 }
289 
290 static void
291 for_each_channel_unreg(void)
292 {
293 	struct spdk_io_channel *ch0;
294 	struct io_device *dev;
295 	struct unreg_ctx ctx = {};
296 	int io_target;
297 
298 	allocate_threads(1);
299 	set_thread(0);
300 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
301 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
302 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
303 	dev = TAILQ_FIRST(&g_io_devices);
304 	SPDK_CU_ASSERT_FATAL(dev != NULL);
305 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
306 	ch0 = spdk_get_io_channel(&io_target);
307 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
308 
309 	spdk_io_device_unregister(&io_target, NULL);
310 	/*
311 	 * There is an outstanding foreach call on the io_device, so the unregister should not
312 	 *  have removed the device.
313 	 */
314 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
315 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
316 	/*
317 	 * There is already a device registered at &io_target, so a new io_device should not
318 	 *  have been added to g_io_devices.
319 	 */
320 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
321 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
322 
323 	poll_thread(0);
324 	CU_ASSERT(ctx.ch_done == true);
325 	CU_ASSERT(ctx.foreach_done == true);
326 	/*
327 	 * There are no more foreach operations outstanding, so we can unregister the device,
328 	 *  even though a channel still exists for the device.
329 	 */
330 	spdk_io_device_unregister(&io_target, NULL);
331 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
332 
333 	set_thread(0);
334 	spdk_put_io_channel(ch0);
335 
336 	poll_threads();
337 
338 	free_threads();
339 }
340 
341 static void
342 thread_name(void)
343 {
344 	struct spdk_thread *thread;
345 	const char *name;
346 
347 	/* Create thread with no name, which automatically generates one */
348 	thread = spdk_thread_create(NULL);
349 	spdk_set_thread(thread);
350 	thread = spdk_get_thread();
351 	SPDK_CU_ASSERT_FATAL(thread != NULL);
352 	name = spdk_thread_get_name(thread);
353 	CU_ASSERT(name != NULL);
354 	spdk_thread_exit(thread);
355 
356 	/* Create thread named "test_thread" */
357 	thread = spdk_thread_create("test_thread");
358 	spdk_set_thread(thread);
359 	thread = spdk_get_thread();
360 	SPDK_CU_ASSERT_FATAL(thread != NULL);
361 	name = spdk_thread_get_name(thread);
362 	SPDK_CU_ASSERT_FATAL(name != NULL);
363 	CU_ASSERT(strcmp(name, "test_thread") == 0);
364 	spdk_thread_exit(thread);
365 }
366 
367 static uint64_t device1;
368 static uint64_t device2;
369 static uint64_t device3;
370 
371 static uint64_t ctx1 = 0x1111;
372 static uint64_t ctx2 = 0x2222;
373 
374 static int g_create_cb_calls = 0;
375 static int g_destroy_cb_calls = 0;
376 
377 static int
378 create_cb_1(void *io_device, void *ctx_buf)
379 {
380 	CU_ASSERT(io_device == &device1);
381 	*(uint64_t *)ctx_buf = ctx1;
382 	g_create_cb_calls++;
383 	return 0;
384 }
385 
386 static void
387 destroy_cb_1(void *io_device, void *ctx_buf)
388 {
389 	CU_ASSERT(io_device == &device1);
390 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx1);
391 	g_destroy_cb_calls++;
392 }
393 
394 static int
395 create_cb_2(void *io_device, void *ctx_buf)
396 {
397 	CU_ASSERT(io_device == &device2);
398 	*(uint64_t *)ctx_buf = ctx2;
399 	g_create_cb_calls++;
400 	return 0;
401 }
402 
403 static void
404 destroy_cb_2(void *io_device, void *ctx_buf)
405 {
406 	CU_ASSERT(io_device == &device2);
407 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx2);
408 	g_destroy_cb_calls++;
409 }
410 
411 static void
412 channel(void)
413 {
414 	struct spdk_io_channel *ch1, *ch2;
415 	void *ctx;
416 
417 	allocate_threads(1);
418 	set_thread(0);
419 
420 	spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL);
421 	spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL);
422 
423 	g_create_cb_calls = 0;
424 	ch1 = spdk_get_io_channel(&device1);
425 	CU_ASSERT(g_create_cb_calls == 1);
426 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
427 
428 	g_create_cb_calls = 0;
429 	ch2 = spdk_get_io_channel(&device1);
430 	CU_ASSERT(g_create_cb_calls == 0);
431 	CU_ASSERT(ch1 == ch2);
432 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
433 
434 	g_destroy_cb_calls = 0;
435 	spdk_put_io_channel(ch2);
436 	poll_threads();
437 	CU_ASSERT(g_destroy_cb_calls == 0);
438 
439 	g_create_cb_calls = 0;
440 	ch2 = spdk_get_io_channel(&device2);
441 	CU_ASSERT(g_create_cb_calls == 1);
442 	CU_ASSERT(ch1 != ch2);
443 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
444 
445 	ctx = spdk_io_channel_get_ctx(ch2);
446 	CU_ASSERT(*(uint64_t *)ctx == ctx2);
447 
448 	g_destroy_cb_calls = 0;
449 	spdk_put_io_channel(ch1);
450 	poll_threads();
451 	CU_ASSERT(g_destroy_cb_calls == 1);
452 
453 	g_destroy_cb_calls = 0;
454 	spdk_put_io_channel(ch2);
455 	poll_threads();
456 	CU_ASSERT(g_destroy_cb_calls == 1);
457 
458 	ch1 = spdk_get_io_channel(&device3);
459 	CU_ASSERT(ch1 == NULL);
460 
461 	spdk_io_device_unregister(&device1, NULL);
462 	poll_threads();
463 	spdk_io_device_unregister(&device2, NULL);
464 	poll_threads();
465 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
466 	free_threads();
467 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
468 }
469 
470 static int
471 create_cb(void *io_device, void *ctx_buf)
472 {
473 	uint64_t *refcnt = (uint64_t *)ctx_buf;
474 
475 	CU_ASSERT(*refcnt == 0);
476 	*refcnt = 1;
477 
478 	return 0;
479 }
480 
481 static void
482 destroy_cb(void *io_device, void *ctx_buf)
483 {
484 	uint64_t *refcnt = (uint64_t *)ctx_buf;
485 
486 	CU_ASSERT(*refcnt == 1);
487 	*refcnt = 0;
488 }
489 
490 /**
491  * This test is checking that a sequence of get, put, get, put without allowing
492  * the deferred put operation to complete doesn't result in releasing the memory
493  * for the channel twice.
494  */
495 static void
496 channel_destroy_races(void)
497 {
498 	uint64_t device;
499 	struct spdk_io_channel *ch;
500 
501 	allocate_threads(1);
502 	set_thread(0);
503 
504 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
505 
506 	ch = spdk_get_io_channel(&device);
507 	SPDK_CU_ASSERT_FATAL(ch != NULL);
508 
509 	spdk_put_io_channel(ch);
510 
511 	ch = spdk_get_io_channel(&device);
512 	SPDK_CU_ASSERT_FATAL(ch != NULL);
513 
514 	spdk_put_io_channel(ch);
515 	poll_threads();
516 
517 	spdk_io_device_unregister(&device, NULL);
518 	poll_threads();
519 
520 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
521 	free_threads();
522 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
523 }
524 
525 int
526 main(int argc, char **argv)
527 {
528 	CU_pSuite	suite = NULL;
529 	unsigned int	num_failures;
530 
531 	if (CU_initialize_registry() != CUE_SUCCESS) {
532 		return CU_get_error();
533 	}
534 
535 	suite = CU_add_suite("io_channel", NULL, NULL);
536 	if (suite == NULL) {
537 		CU_cleanup_registry();
538 		return CU_get_error();
539 	}
540 
541 	if (
542 		CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
543 		CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
544 		CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
545 		CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
546 		CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
547 		CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
548 		CU_add_test(suite, "thread_name", thread_name) == NULL ||
549 		CU_add_test(suite, "channel", channel) == NULL ||
550 		CU_add_test(suite, "channel_destroy_races", channel_destroy_races) == NULL
551 	) {
552 		CU_cleanup_registry();
553 		return CU_get_error();
554 	}
555 
556 	CU_basic_set_mode(CU_BRM_VERBOSE);
557 	CU_basic_run_tests();
558 	num_failures = CU_get_number_of_failures();
559 	CU_cleanup_registry();
560 	return num_failures;
561 }
562