xref: /spdk/test/unit/lib/thread/thread.c/thread_ut.c (revision 9c2aea2ad581e78982146835ecab3b12107d25d6)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "thread/thread.c"
39 #include "common/lib/ut_multithread.c"
40 
41 static void
42 thread_alloc(void)
43 {
44 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
45 	allocate_threads(1);
46 	CU_ASSERT(!TAILQ_EMPTY(&g_threads));
47 	free_threads();
48 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
49 }
50 
51 static void
52 send_msg_cb(void *ctx)
53 {
54 	bool *done = ctx;
55 
56 	*done = true;
57 }
58 
59 static void
60 thread_send_msg(void)
61 {
62 	struct spdk_thread *thread0;
63 	bool done = false;
64 
65 	allocate_threads(2);
66 	set_thread(0);
67 	thread0 = spdk_get_thread();
68 
69 	set_thread(1);
70 	/* Simulate thread 1 sending a message to thread 0. */
71 	spdk_thread_send_msg(thread0, send_msg_cb, &done);
72 
73 	/* We have not polled thread 0 yet, so done should be false. */
74 	CU_ASSERT(!done);
75 
76 	/*
77 	 * Poll thread 1.  The message was sent to thread 0, so this should be
78 	 *  a nop and done should still be false.
79 	 */
80 	poll_thread(1);
81 	CU_ASSERT(!done);
82 
83 	/*
84 	 * Poll thread 0.  This should execute the message and done should then
85 	 *  be true.
86 	 */
87 	poll_thread(0);
88 	CU_ASSERT(done);
89 
90 	free_threads();
91 }
92 
93 static int
94 poller_run_done(void *ctx)
95 {
96 	bool	*poller_run = ctx;
97 
98 	*poller_run = true;
99 
100 	return -1;
101 }
102 
103 static void
104 thread_poller(void)
105 {
106 	struct spdk_poller	*poller = NULL;
107 	bool			poller_run = false;
108 
109 	allocate_threads(1);
110 
111 	set_thread(0);
112 	MOCK_SET(spdk_get_ticks, 0);
113 	/* Register a poller with no-wait time and test execution */
114 	poller = spdk_poller_register(poller_run_done, &poller_run, 0);
115 	CU_ASSERT(poller != NULL);
116 
117 	poll_threads();
118 	CU_ASSERT(poller_run == true);
119 
120 	spdk_poller_unregister(&poller);
121 	CU_ASSERT(poller == NULL);
122 
123 	/* Register a poller with 1000us wait time and test single execution */
124 	poller_run = false;
125 	poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
126 	CU_ASSERT(poller != NULL);
127 
128 	poll_threads();
129 	CU_ASSERT(poller_run == false);
130 
131 	spdk_delay_us(1000);
132 	poll_threads();
133 	CU_ASSERT(poller_run == true);
134 
135 	poller_run = false;
136 	poll_threads();
137 	CU_ASSERT(poller_run == false);
138 
139 	spdk_delay_us(1000);
140 	poll_threads();
141 	CU_ASSERT(poller_run == true);
142 
143 	spdk_poller_unregister(&poller);
144 	CU_ASSERT(poller == NULL);
145 
146 	free_threads();
147 }
148 
149 static void
150 for_each_cb(void *ctx)
151 {
152 	int *count = ctx;
153 
154 	(*count)++;
155 }
156 
157 static void
158 thread_for_each(void)
159 {
160 	int count = 0;
161 	int i;
162 
163 	allocate_threads(3);
164 	set_thread(0);
165 
166 	spdk_for_each_thread(for_each_cb, &count, for_each_cb);
167 
168 	/* We have not polled thread 0 yet, so count should be 0 */
169 	CU_ASSERT(count == 0);
170 
171 	/* Poll each thread to verify the message is passed to each */
172 	for (i = 0; i < 3; i++) {
173 		poll_thread(i);
174 		CU_ASSERT(count == (i + 1));
175 	}
176 
177 	/*
178 	 * After each thread is called, the completion calls it
179 	 * one more time.
180 	 */
181 	poll_thread(0);
182 	CU_ASSERT(count == 4);
183 
184 	free_threads();
185 }
186 
187 static int
188 channel_create(void *io_device, void *ctx_buf)
189 {
190 	return 0;
191 }
192 
193 static void
194 channel_destroy(void *io_device, void *ctx_buf)
195 {
196 }
197 
198 static void
199 channel_msg(struct spdk_io_channel_iter *i)
200 {
201 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
202 	int *count = spdk_io_channel_get_ctx(ch);
203 
204 	(*count)++;
205 
206 	spdk_for_each_channel_continue(i, 0);
207 }
208 
209 static void
210 channel_cpl(struct spdk_io_channel_iter *i, int status)
211 {
212 }
213 
214 static void
215 for_each_channel_remove(void)
216 {
217 	struct spdk_io_channel *ch0, *ch1, *ch2;
218 	int io_target;
219 	int count = 0;
220 
221 	allocate_threads(3);
222 	set_thread(0);
223 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
224 	ch0 = spdk_get_io_channel(&io_target);
225 	set_thread(1);
226 	ch1 = spdk_get_io_channel(&io_target);
227 	set_thread(2);
228 	ch2 = spdk_get_io_channel(&io_target);
229 
230 	/*
231 	 * Test that io_channel handles the case where we start to iterate through
232 	 *  the channels, and during the iteration, one of the channels is deleted.
233 	 * This is done in some different and sometimes non-intuitive orders, because
234 	 *  some operations are deferred and won't execute until their threads are
235 	 *  polled.
236 	 *
237 	 * Case #1: Put the I/O channel before spdk_for_each_channel.
238 	 */
239 	set_thread(0);
240 	spdk_put_io_channel(ch0);
241 	poll_threads();
242 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
243 	poll_threads();
244 
245 	/*
246 	 * Case #2: Put the I/O channel after spdk_for_each_channel, but before
247 	 *  thread 0 is polled.
248 	 */
249 	ch0 = spdk_get_io_channel(&io_target);
250 	spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
251 	spdk_put_io_channel(ch0);
252 	poll_threads();
253 
254 	set_thread(1);
255 	spdk_put_io_channel(ch1);
256 	set_thread(2);
257 	spdk_put_io_channel(ch2);
258 	spdk_io_device_unregister(&io_target, NULL);
259 	poll_threads();
260 
261 	free_threads();
262 }
263 
264 struct unreg_ctx {
265 	bool	ch_done;
266 	bool	foreach_done;
267 };
268 
269 static void
270 unreg_ch_done(struct spdk_io_channel_iter *i)
271 {
272 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
273 
274 	ctx->ch_done = true;
275 
276 	SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
277 	spdk_for_each_channel_continue(i, 0);
278 }
279 
280 static void
281 unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
282 {
283 	struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
284 
285 	ctx->foreach_done = true;
286 }
287 
288 static void
289 for_each_channel_unreg(void)
290 {
291 	struct spdk_io_channel *ch0;
292 	struct io_device *dev;
293 	struct unreg_ctx ctx = {};
294 	int io_target;
295 
296 	allocate_threads(1);
297 	set_thread(0);
298 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
299 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
300 	CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
301 	dev = TAILQ_FIRST(&g_io_devices);
302 	SPDK_CU_ASSERT_FATAL(dev != NULL);
303 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
304 	ch0 = spdk_get_io_channel(&io_target);
305 	spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
306 
307 	spdk_io_device_unregister(&io_target, NULL);
308 	/*
309 	 * There is an outstanding foreach call on the io_device, so the unregister should not
310 	 *  have removed the device.
311 	 */
312 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
313 	spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
314 	/*
315 	 * There is already a device registered at &io_target, so a new io_device should not
316 	 *  have been added to g_io_devices.
317 	 */
318 	CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
319 	CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
320 
321 	poll_thread(0);
322 	CU_ASSERT(ctx.ch_done == true);
323 	CU_ASSERT(ctx.foreach_done == true);
324 	/*
325 	 * There are no more foreach operations outstanding, so we can unregister the device,
326 	 *  even though a channel still exists for the device.
327 	 */
328 	spdk_io_device_unregister(&io_target, NULL);
329 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
330 
331 	set_thread(0);
332 	spdk_put_io_channel(ch0);
333 
334 	poll_threads();
335 
336 	free_threads();
337 }
338 
339 static void
340 thread_name(void)
341 {
342 	struct spdk_thread *thread;
343 	const char *name;
344 
345 	/* Create thread with no name, which automatically generates one */
346 	spdk_allocate_thread(NULL, NULL, NULL, NULL, NULL);
347 	thread = spdk_get_thread();
348 	SPDK_CU_ASSERT_FATAL(thread != NULL);
349 	name = spdk_thread_get_name(thread);
350 	CU_ASSERT(name != NULL);
351 	spdk_free_thread();
352 
353 	/* Create thread named "test_thread" */
354 	spdk_allocate_thread(NULL, NULL, NULL, NULL, "test_thread");
355 	thread = spdk_get_thread();
356 	SPDK_CU_ASSERT_FATAL(thread != NULL);
357 	name = spdk_thread_get_name(thread);
358 	SPDK_CU_ASSERT_FATAL(name != NULL);
359 	CU_ASSERT(strcmp(name, "test_thread") == 0);
360 	spdk_free_thread();
361 }
362 
363 static uint64_t device1;
364 static uint64_t device2;
365 static uint64_t device3;
366 
367 static uint64_t ctx1 = 0x1111;
368 static uint64_t ctx2 = 0x2222;
369 
370 static int g_create_cb_calls = 0;
371 static int g_destroy_cb_calls = 0;
372 
373 static int
374 create_cb_1(void *io_device, void *ctx_buf)
375 {
376 	CU_ASSERT(io_device == &device1);
377 	*(uint64_t *)ctx_buf = ctx1;
378 	g_create_cb_calls++;
379 	return 0;
380 }
381 
382 static void
383 destroy_cb_1(void *io_device, void *ctx_buf)
384 {
385 	CU_ASSERT(io_device == &device1);
386 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx1);
387 	g_destroy_cb_calls++;
388 }
389 
390 static int
391 create_cb_2(void *io_device, void *ctx_buf)
392 {
393 	CU_ASSERT(io_device == &device2);
394 	*(uint64_t *)ctx_buf = ctx2;
395 	g_create_cb_calls++;
396 	return 0;
397 }
398 
399 static void
400 destroy_cb_2(void *io_device, void *ctx_buf)
401 {
402 	CU_ASSERT(io_device == &device2);
403 	CU_ASSERT(*(uint64_t *)ctx_buf == ctx2);
404 	g_destroy_cb_calls++;
405 }
406 
407 static void
408 channel(void)
409 {
410 	struct spdk_io_channel *ch1, *ch2;
411 	void *ctx;
412 
413 	allocate_threads(1);
414 	set_thread(0);
415 
416 	spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL);
417 	spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL);
418 
419 	g_create_cb_calls = 0;
420 	ch1 = spdk_get_io_channel(&device1);
421 	CU_ASSERT(g_create_cb_calls == 1);
422 	SPDK_CU_ASSERT_FATAL(ch1 != NULL);
423 
424 	g_create_cb_calls = 0;
425 	ch2 = spdk_get_io_channel(&device1);
426 	CU_ASSERT(g_create_cb_calls == 0);
427 	CU_ASSERT(ch1 == ch2);
428 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
429 
430 	g_destroy_cb_calls = 0;
431 	spdk_put_io_channel(ch2);
432 	poll_threads();
433 	CU_ASSERT(g_destroy_cb_calls == 0);
434 
435 	g_create_cb_calls = 0;
436 	ch2 = spdk_get_io_channel(&device2);
437 	CU_ASSERT(g_create_cb_calls == 1);
438 	CU_ASSERT(ch1 != ch2);
439 	SPDK_CU_ASSERT_FATAL(ch2 != NULL);
440 
441 	ctx = spdk_io_channel_get_ctx(ch2);
442 	CU_ASSERT(*(uint64_t *)ctx == ctx2);
443 
444 	g_destroy_cb_calls = 0;
445 	spdk_put_io_channel(ch1);
446 	poll_threads();
447 	CU_ASSERT(g_destroy_cb_calls == 1);
448 
449 	g_destroy_cb_calls = 0;
450 	spdk_put_io_channel(ch2);
451 	poll_threads();
452 	CU_ASSERT(g_destroy_cb_calls == 1);
453 
454 	ch1 = spdk_get_io_channel(&device3);
455 	CU_ASSERT(ch1 == NULL);
456 
457 	spdk_io_device_unregister(&device1, NULL);
458 	poll_threads();
459 	spdk_io_device_unregister(&device2, NULL);
460 	poll_threads();
461 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
462 	free_threads();
463 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
464 }
465 
466 static int
467 create_cb(void *io_device, void *ctx_buf)
468 {
469 	uint64_t *refcnt = (uint64_t *)ctx_buf;
470 
471 	CU_ASSERT(*refcnt == 0);
472 	*refcnt = 1;
473 
474 	return 0;
475 }
476 
477 static void
478 destroy_cb(void *io_device, void *ctx_buf)
479 {
480 	uint64_t *refcnt = (uint64_t *)ctx_buf;
481 
482 	CU_ASSERT(*refcnt == 1);
483 	*refcnt = 0;
484 }
485 
486 /**
487  * This test is checking that a sequence of get, put, get, put without allowing
488  * the deferred put operation to complete doesn't result in releasing the memory
489  * for the channel twice.
490  */
491 static void
492 channel_destroy_races(void)
493 {
494 	uint64_t device;
495 	struct spdk_io_channel *ch;
496 
497 	allocate_threads(1);
498 	set_thread(0);
499 
500 	spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL);
501 
502 	ch = spdk_get_io_channel(&device);
503 	SPDK_CU_ASSERT_FATAL(ch != NULL);
504 
505 	spdk_put_io_channel(ch);
506 
507 	ch = spdk_get_io_channel(&device);
508 	SPDK_CU_ASSERT_FATAL(ch != NULL);
509 
510 	spdk_put_io_channel(ch);
511 	poll_threads();
512 
513 	spdk_io_device_unregister(&device, NULL);
514 	poll_threads();
515 
516 	CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
517 	free_threads();
518 	CU_ASSERT(TAILQ_EMPTY(&g_threads));
519 }
520 
521 int
522 main(int argc, char **argv)
523 {
524 	CU_pSuite	suite = NULL;
525 	unsigned int	num_failures;
526 
527 	if (CU_initialize_registry() != CUE_SUCCESS) {
528 		return CU_get_error();
529 	}
530 
531 	suite = CU_add_suite("io_channel", NULL, NULL);
532 	if (suite == NULL) {
533 		CU_cleanup_registry();
534 		return CU_get_error();
535 	}
536 
537 	if (
538 		CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
539 		CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
540 		CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
541 		CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
542 		CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
543 		CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
544 		CU_add_test(suite, "thread_name", thread_name) == NULL ||
545 		CU_add_test(suite, "channel", channel) == NULL ||
546 		CU_add_test(suite, "channel_destroy_races", channel_destroy_races) == NULL
547 	) {
548 		CU_cleanup_registry();
549 		return CU_get_error();
550 	}
551 
552 	CU_basic_set_mode(CU_BRM_VERBOSE);
553 	CU_basic_run_tests();
554 	num_failures = CU_get_number_of_failures();
555 	CU_cleanup_registry();
556 	return num_failures;
557 }
558