xref: /netbsd-src/external/bsd/libevent/dist/test/regress_thread.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: regress_thread.c,v 1.5 2017/01/31 23:17:40 christos Exp $	*/
2 /*
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "util-internal.h"
28 
29 /* The old tests here need assertions to work. */
30 #undef NDEBUG
31 
32 #include "event2/event-config.h"
33 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: regress_thread.c,v 1.5 2017/01/31 23:17:40 christos Exp $");
35 
36 #include <sys/types.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #ifdef EVENT__HAVE_UNISTD_H
41 #include <unistd.h>
42 #endif
43 #ifdef EVENT__HAVE_SYS_WAIT_H
44 #include <sys/wait.h>
45 #endif
46 
47 #ifdef EVENT__HAVE_PTHREADS
48 #include <pthread.h>
49 #elif defined(_WIN32)
50 #include <process.h>
51 #endif
52 #include <assert.h>
53 #ifdef EVENT__HAVE_UNISTD_H
54 #include <unistd.h>
55 #endif
56 #include <time.h>
57 
58 #include "sys/queue.h"
59 
60 #include "event2/event.h"
61 #include "event2/event_struct.h"
62 #include "event2/thread.h"
63 #include "event2/util.h"
64 #include "evthread-internal.h"
65 #include "event-internal.h"
66 #include "defer-internal.h"
67 #include "regress.h"
68 #include "tinytest_macros.h"
69 #include "time-internal.h"
70 #include "regress_thread.h"
71 
72 struct cond_wait {
73 	void *lock;
74 	void *cond;
75 };
76 
77 static void
78 wake_all_timeout(evutil_socket_t fd, short what, void *arg)
79 {
80 	struct cond_wait *cw = arg;
81 	EVLOCK_LOCK(cw->lock, 0);
82 	EVTHREAD_COND_BROADCAST(cw->cond);
83 	EVLOCK_UNLOCK(cw->lock, 0);
84 
85 }
86 
87 #if 0
88 static void
89 wake_one_timeout(evutil_socket_t fd, short what, void *arg)
90 {
91 	struct cond_wait *cw = arg;
92 	EVLOCK_LOCK(cw->lock, 0);
93 	EVTHREAD_COND_SIGNAL(cw->cond);
94 	EVLOCK_UNLOCK(cw->lock, 0);
95 }
96 #endif
97 
98 #define NUM_THREADS	100
99 #define NUM_ITERATIONS  100
100 void *count_lock;
101 static int count;
102 
103 static THREAD_FN
104 basic_thread(void *arg)
105 {
106 	struct cond_wait cw;
107 	struct event_base *base = arg;
108 	struct event ev;
109 	int i = 0;
110 
111 	EVTHREAD_ALLOC_LOCK(cw.lock, 0);
112 	EVTHREAD_ALLOC_COND(cw.cond);
113 	assert(cw.lock);
114 	assert(cw.cond);
115 
116 	evtimer_assign(&ev, base, wake_all_timeout, &cw);
117 	for (i = 0; i < NUM_ITERATIONS; i++) {
118 		struct timeval tv;
119 		evutil_timerclear(&tv);
120 		tv.tv_sec = 0;
121 		tv.tv_usec = 3000;
122 
123 		EVLOCK_LOCK(cw.lock, 0);
124 		/* we need to make sure that event does not happen before
125 		 * we get to wait on the conditional variable */
126 		assert(evtimer_add(&ev, &tv) == 0);
127 
128 		assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0);
129 		EVLOCK_UNLOCK(cw.lock, 0);
130 
131 		EVLOCK_LOCK(count_lock, 0);
132 		++count;
133 		EVLOCK_UNLOCK(count_lock, 0);
134 	}
135 
136 	/* exit the loop only if all threads fired all timeouts */
137 	EVLOCK_LOCK(count_lock, 0);
138 	if (count >= NUM_THREADS * NUM_ITERATIONS)
139 		event_base_loopexit(base, NULL);
140 	EVLOCK_UNLOCK(count_lock, 0);
141 
142 	EVTHREAD_FREE_LOCK(cw.lock, 0);
143 	EVTHREAD_FREE_COND(cw.cond);
144 
145 	THREAD_RETURN();
146 }
147 
148 static int notification_fd_used = 0;
149 #ifndef _WIN32
150 static int got_sigchld = 0;
151 static void
152 sigchld_cb(evutil_socket_t fd, short event, void *arg)
153 {
154 	struct timeval tv;
155 	struct event_base *base = arg;
156 
157 	got_sigchld++;
158 	tv.tv_usec = 100000;
159 	tv.tv_sec = 0;
160 	event_base_loopexit(base, &tv);
161 }
162 
163 
164 static void
165 notify_fd_cb(evutil_socket_t fd, short event, void *arg)
166 {
167 	++notification_fd_used;
168 }
169 #endif
170 
171 static void
172 thread_basic(void *arg)
173 {
174 	THREAD_T threads[NUM_THREADS];
175 	struct event ev;
176 	struct timeval tv;
177 	int i;
178 	struct basic_test_data *data = arg;
179 	struct event_base *base = data->base;
180 
181 	struct event *notification_event = NULL;
182 	struct event *sigchld_event = NULL;
183 
184 	EVTHREAD_ALLOC_LOCK(count_lock, 0);
185 	tt_assert(count_lock);
186 
187 	tt_assert(base);
188 	if (evthread_make_base_notifiable(base)<0) {
189 		tt_abort_msg("Couldn't make base notifiable!");
190 	}
191 
192 #ifndef _WIN32
193 	if (data->setup_data && !strcmp(data->setup_data, "forking")) {
194 		pid_t pid;
195 		int status;
196 		sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base);
197 		/* This piggybacks on the th_notify_fd weirdly, and looks
198 		 * inside libevent internals.  Not a good idea in non-testing
199 		 * code! */
200 		notification_event = event_new(base,
201 		    base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb,
202 		    NULL);
203 		event_add(sigchld_event, NULL);
204 		event_add(notification_event, NULL);
205 
206 		if ((pid = fork()) == 0) {
207 			event_del(notification_event);
208 			if (event_reinit(base) < 0) {
209 				TT_FAIL(("reinit"));
210 				exit(1);
211 			}
212 			event_assign(notification_event, base,
213 			    base->th_notify_fd[0], EV_READ|EV_PERSIST,
214 			    notify_fd_cb, NULL);
215 			event_add(notification_event, NULL);
216 	 		goto child;
217 		}
218 
219 		event_base_dispatch(base);
220 
221 		if (waitpid(pid, &status, 0) == -1)
222 			tt_abort_perror("waitpid");
223 		TT_BLATHER(("Waitpid okay\n"));
224 
225 		tt_assert(got_sigchld);
226 		tt_int_op(notification_fd_used, ==, 0);
227 
228 		goto end;
229 	}
230 
231 child:
232 #endif
233 	for (i = 0; i < NUM_THREADS; ++i)
234 		THREAD_START(threads[i], basic_thread, base);
235 
236 	evtimer_assign(&ev, base, NULL, NULL);
237 	evutil_timerclear(&tv);
238 	tv.tv_sec = 1000;
239 	event_add(&ev, &tv);
240 
241 	event_base_dispatch(base);
242 
243 	for (i = 0; i < NUM_THREADS; ++i)
244 		THREAD_JOIN(threads[i]);
245 
246 	event_del(&ev);
247 
248 	tt_int_op(count, ==, NUM_THREADS * NUM_ITERATIONS);
249 
250 	EVTHREAD_FREE_LOCK(count_lock, 0);
251 
252 	TT_BLATHER(("notifiations==%d", notification_fd_used));
253 
254 end:
255 
256 	if (notification_event)
257 		event_free(notification_event);
258 	if (sigchld_event)
259 		event_free(sigchld_event);
260 }
261 
262 #undef NUM_THREADS
263 #define NUM_THREADS 10
264 
265 struct alerted_record {
266 	struct cond_wait *cond;
267 	struct timeval delay;
268 	struct timeval alerted_at;
269 	int timed_out;
270 };
271 
272 #if 0
273 static THREAD_FN
274 wait_for_condition(void *arg)
275 {
276 	struct alerted_record *rec = arg;
277 	int r;
278 
279 	EVLOCK_LOCK(rec->cond->lock, 0);
280 	if (rec->delay.tv_sec || rec->delay.tv_usec) {
281 		r = EVTHREAD_COND_WAIT_TIMED(rec->cond->cond, rec->cond->lock,
282 		    &rec->delay);
283 	} else {
284 		r = EVTHREAD_COND_WAIT(rec->cond->cond, rec->cond->lock);
285 	}
286 	EVLOCK_UNLOCK(rec->cond->lock, 0);
287 
288 	evutil_gettimeofday(&rec->alerted_at, NULL);
289 	if (r == 1)
290 		rec->timed_out = 1;
291 
292 	THREAD_RETURN();
293 }
294 
295 static void
296 thread_conditions_simple(void *arg)
297 {
298 	struct timeval tv_signal, tv_timeout, tv_broadcast;
299 	struct alerted_record alerted[NUM_THREADS];
300 	THREAD_T threads[NUM_THREADS];
301 	struct cond_wait cond;
302 	int i;
303 	struct timeval launched_at;
304 	struct event wake_one;
305 	struct event wake_all;
306 	struct basic_test_data *data = arg;
307 	struct event_base *base = data->base;
308 	int n_timed_out=0, n_signal=0, n_broadcast=0;
309 
310 	tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0;
311 	tv_signal.tv_usec = 30*1000;
312 	tv_timeout.tv_usec = 150*1000;
313 	tv_broadcast.tv_usec = 500*1000;
314 
315 	EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
316 	EVTHREAD_ALLOC_COND(cond.cond);
317 	tt_assert(cond.lock);
318 	tt_assert(cond.cond);
319 	for (i = 0; i < NUM_THREADS; ++i) {
320 		memset(&alerted[i], 0, sizeof(struct alerted_record));
321 		alerted[i].cond = &cond;
322 	}
323 
324 	/* Threads 5 and 6 will be allowed to time out */
325 	memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout));
326 	memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout));
327 
328 	evtimer_assign(&wake_one, base, wake_one_timeout, &cond);
329 	evtimer_assign(&wake_all, base, wake_all_timeout, &cond);
330 
331 	evutil_gettimeofday(&launched_at, NULL);
332 
333 	/* Launch the threads... */
334 	for (i = 0; i < NUM_THREADS; ++i) {
335 		THREAD_START(threads[i], wait_for_condition, &alerted[i]);
336 	}
337 
338 	/* Start the timers... */
339 	tt_int_op(event_add(&wake_one, &tv_signal), ==, 0);
340 	tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0);
341 
342 	/* And run for a bit... */
343 	event_base_dispatch(base);
344 
345 	/* And wait till the threads are done. */
346 	for (i = 0; i < NUM_THREADS; ++i)
347 		THREAD_JOIN(threads[i]);
348 
349 	/* Now, let's see what happened. At least one of 5 or 6 should
350 	 * have timed out. */
351 	n_timed_out = alerted[5].timed_out + alerted[6].timed_out;
352 	tt_int_op(n_timed_out, >=, 1);
353 	tt_int_op(n_timed_out, <=, 2);
354 
355 	for (i = 0; i < NUM_THREADS; ++i) {
356 		const struct timeval *target_delay;
357 		struct timeval target_time, actual_delay;
358 		if (alerted[i].timed_out) {
359 			TT_BLATHER(("%d looks like a timeout\n", i));
360 			target_delay = &tv_timeout;
361 			tt_assert(i == 5 || i == 6);
362 		} else if (evutil_timerisset(&alerted[i].alerted_at)) {
363 			long diff1,diff2;
364 			evutil_timersub(&alerted[i].alerted_at,
365 			    &launched_at, &actual_delay);
366 			diff1 = timeval_msec_diff(&actual_delay,
367 			    &tv_signal);
368 			diff2 = timeval_msec_diff(&actual_delay,
369 			    &tv_broadcast);
370 			if (labs(diff1) < labs(diff2)) {
371 				TT_BLATHER(("%d looks like a signal\n", i));
372 				target_delay = &tv_signal;
373 				++n_signal;
374 			} else {
375 				TT_BLATHER(("%d looks like a broadcast\n", i));
376 				target_delay = &tv_broadcast;
377 				++n_broadcast;
378 			}
379 		} else {
380 			TT_FAIL(("Thread %d never got woken", i));
381 			continue;
382 		}
383 		evutil_timeradd(target_delay, &launched_at, &target_time);
384 		test_timeval_diff_leq(&target_time, &alerted[i].alerted_at,
385 		    0, 200);
386 	}
387 	tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS);
388 	tt_int_op(n_signal, ==, 1);
389 
390 end:
391 	EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
392 	EVTHREAD_FREE_COND(cond.cond);
393 }
394 #endif
395 
396 #define CB_COUNT 128
397 #define QUEUE_THREAD_COUNT 8
398 
399 static void
400 SLEEP_MS(int ms)
401 {
402 	struct timeval tv;
403 	tv.tv_sec = ms/1000;
404 	tv.tv_usec = (ms%1000)*1000;
405 	evutil_usleep_(&tv);
406 }
407 
408 struct deferred_test_data {
409 	struct event_callback cbs[CB_COUNT];
410 	struct event_base *queue;
411 };
412 
413 static struct timeval timer_start = {0,0};
414 static struct timeval timer_end = {0,0};
415 static unsigned callback_count = 0;
416 static THREAD_T load_threads[QUEUE_THREAD_COUNT];
417 static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
418 
419 static void
420 deferred_callback(struct event_callback *cb, void *arg)
421 {
422 	SLEEP_MS(1);
423 	callback_count += 1;
424 }
425 
426 static THREAD_FN
427 load_deferred_queue(void *arg)
428 {
429 	struct deferred_test_data *data = arg;
430 	size_t i;
431 
432 	for (i = 0; i < CB_COUNT; ++i) {
433 		event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback,
434 		    NULL);
435 		event_deferred_cb_schedule_(data->queue, &data->cbs[i]);
436 		SLEEP_MS(1);
437 	}
438 
439 	THREAD_RETURN();
440 }
441 
442 static void
443 timer_callback(evutil_socket_t fd, short what, void *arg)
444 {
445 	evutil_gettimeofday(&timer_end, NULL);
446 }
447 
448 static void
449 start_threads_callback(evutil_socket_t fd, short what, void *arg)
450 {
451 	int i;
452 
453 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
454 		THREAD_START(load_threads[i], load_deferred_queue,
455 				&deferred_data[i]);
456 	}
457 }
458 
459 static void
460 thread_deferred_cb_skew(void *arg)
461 {
462 	struct timeval tv_timer = {1, 0};
463 	struct event_base *base = NULL;
464 	struct event_config *cfg = NULL;
465 	struct timeval elapsed;
466 	int elapsed_usec;
467 	int i;
468 
469 	cfg = event_config_new();
470 	tt_assert(cfg);
471 	event_config_set_max_dispatch_interval(cfg, NULL, 16, 0);
472 
473 	base = event_base_new_with_config(cfg);
474 	tt_assert(base);
475 
476 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
477 		deferred_data[i].queue = base;
478 
479 	evutil_gettimeofday(&timer_start, NULL);
480 	event_base_once(base, -1, EV_TIMEOUT, timer_callback, NULL,
481 			&tv_timer);
482 	event_base_once(base, -1, EV_TIMEOUT, start_threads_callback,
483 			NULL, NULL);
484 	event_base_dispatch(base);
485 
486 	evutil_timersub(&timer_end, &timer_start, &elapsed);
487 	TT_BLATHER(("callback count, %u", callback_count));
488 	elapsed_usec =
489 	    (unsigned)(elapsed.tv_sec*1000000 + elapsed.tv_usec);
490 	TT_BLATHER(("elapsed time, %u usec", elapsed_usec));
491 
492 	/* XXX be more intelligent here.  just make sure skew is
493 	 * within .4 seconds for now. */
494 	tt_assert(elapsed_usec >= 600000 && elapsed_usec <= 1400000);
495 
496 end:
497 	for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
498 		THREAD_JOIN(load_threads[i]);
499 	if (base)
500 		event_base_free(base);
501 	if (cfg)
502 		event_config_free(cfg);
503 }
504 
505 static struct event time_events[5];
506 static struct timeval times[5];
507 static struct event_base *exit_base = NULL;
508 static void
509 note_time_cb(evutil_socket_t fd, short what, void *arg)
510 {
511 	evutil_gettimeofday(arg, NULL);
512 	if (arg == &times[4]) {
513 		event_base_loopbreak(exit_base);
514 	}
515 }
516 static THREAD_FN
517 register_events_subthread(void *arg)
518 {
519 	struct timeval tv = {0,0};
520 	SLEEP_MS(100);
521 	event_active(&time_events[0], EV_TIMEOUT, 1);
522 	SLEEP_MS(100);
523 	event_active(&time_events[1], EV_TIMEOUT, 1);
524 	SLEEP_MS(100);
525 	tv.tv_usec = 100*1000;
526 	event_add(&time_events[2], &tv);
527 	tv.tv_usec = 150*1000;
528 	event_add(&time_events[3], &tv);
529 	SLEEP_MS(200);
530 	event_active(&time_events[4], EV_TIMEOUT, 1);
531 
532 	THREAD_RETURN();
533 }
534 
535 static void
536 thread_no_events(void *arg)
537 {
538 	THREAD_T thread;
539 	struct basic_test_data *data = arg;
540 	struct timeval starttime, endtime;
541 	int i;
542 	exit_base = data->base;
543 
544 	memset(times,0,sizeof(times));
545 	for (i=0;i<5;++i) {
546 		event_assign(&time_events[i], data->base,
547 		    -1, 0, note_time_cb, &times[i]);
548 	}
549 
550 	evutil_gettimeofday(&starttime, NULL);
551 	THREAD_START(thread, register_events_subthread, data->base);
552 	event_base_loop(data->base, EVLOOP_NO_EXIT_ON_EMPTY);
553 	evutil_gettimeofday(&endtime, NULL);
554 	tt_assert(event_base_got_break(data->base));
555 	THREAD_JOIN(thread);
556 	for (i=0; i<5; ++i) {
557 		struct timeval diff;
558 		double sec;
559 		evutil_timersub(&times[i], &starttime, &diff);
560 		sec = diff.tv_sec + diff.tv_usec/1.0e6;
561 		TT_BLATHER(("event %d at %.4f seconds", i, sec));
562 	}
563 	test_timeval_diff_eq(&starttime, &times[0], 100);
564 	test_timeval_diff_eq(&starttime, &times[1], 200);
565 	test_timeval_diff_eq(&starttime, &times[2], 400);
566 	test_timeval_diff_eq(&starttime, &times[3], 450);
567 	test_timeval_diff_eq(&starttime, &times[4], 500);
568 	test_timeval_diff_eq(&starttime, &endtime,  500);
569 
570 end:
571 	;
572 }
573 
574 #define TEST(name)							\
575 	{ #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,	\
576 	  &basic_setup, NULL }
577 
578 struct testcase_t thread_testcases[] = {
579 	{ "basic", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
580 	  &basic_setup, NULL },
581 #ifndef _WIN32
582 	{ "forking", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
583 	  &basic_setup, __UNCONST("forking") },
584 #endif
585 #if 0
586 	TEST(conditions_simple),
587 #endif
588 	{ "deferred_cb_skew", thread_deferred_cb_skew,
589 	  TT_FORK|TT_NEED_THREADS|TT_OFF_BY_DEFAULT,
590 	  &basic_setup, NULL },
591 #ifndef _WIN32
592 	/****** XXX TODO FIXME windows seems to be having some timing trouble,
593 	 * looking into it now. / ellzey
594 	 ******/
595 	TEST(no_events),
596 #endif
597 	END_OF_TESTCASES
598 };
599 
600