1 /* $NetBSD: regress_thread.c,v 1.6 2021/04/07 03:36:48 christos Exp $ */
2
3 /*
4 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28 #include "util-internal.h"
29
30 /* The old tests here need assertions to work. */
31 #undef NDEBUG
32
33 #include "event2/event-config.h"
34 #include <sys/cdefs.h>
35 __RCSID("$NetBSD: regress_thread.c,v 1.6 2021/04/07 03:36:48 christos Exp $");
36
37 #include <sys/types.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #ifdef EVENT__HAVE_UNISTD_H
42 #include <unistd.h>
43 #endif
44 #ifdef EVENT__HAVE_SYS_WAIT_H
45 #include <sys/wait.h>
46 #endif
47
48 #ifdef EVENT__HAVE_PTHREADS
49 #include <pthread.h>
50 #elif defined(_WIN32)
51 #include <process.h>
52 #endif
53 #include <assert.h>
54 #ifdef EVENT__HAVE_UNISTD_H
55 #include <unistd.h>
56 #endif
57 #include <time.h>
58
59 #include "sys/queue.h"
60
61 #include "event2/event.h"
62 #include "event2/event_struct.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "evthread-internal.h"
66 #include "event-internal.h"
67 #include "defer-internal.h"
68 #include "regress.h"
69 #include "tinytest_macros.h"
70 #include "time-internal.h"
71 #include "regress_thread.h"
72
73 struct cond_wait {
74 void *lock;
75 void *cond;
76 };
77
78 static void
wake_all_timeout(evutil_socket_t fd,short what,void * arg)79 wake_all_timeout(evutil_socket_t fd, short what, void *arg)
80 {
81 struct cond_wait *cw = arg;
82 EVLOCK_LOCK(cw->lock, 0);
83 EVTHREAD_COND_BROADCAST(cw->cond);
84 EVLOCK_UNLOCK(cw->lock, 0);
85
86 }
87
88 #if 0
89 static void
90 wake_one_timeout(evutil_socket_t fd, short what, void *arg)
91 {
92 struct cond_wait *cw = arg;
93 EVLOCK_LOCK(cw->lock, 0);
94 EVTHREAD_COND_SIGNAL(cw->cond);
95 EVLOCK_UNLOCK(cw->lock, 0);
96 }
97 #endif
98
99 #define NUM_THREADS 100
100 #define NUM_ITERATIONS 100
101 void *count_lock;
102 static int count;
103
104 static THREAD_FN
basic_thread(void * arg)105 basic_thread(void *arg)
106 {
107 struct cond_wait cw;
108 struct event_base *base = arg;
109 struct event ev;
110 int i = 0;
111
112 EVTHREAD_ALLOC_LOCK(cw.lock, 0);
113 EVTHREAD_ALLOC_COND(cw.cond);
114 assert(cw.lock);
115 assert(cw.cond);
116
117 evtimer_assign(&ev, base, wake_all_timeout, &cw);
118 for (i = 0; i < NUM_ITERATIONS; i++) {
119 struct timeval tv;
120 evutil_timerclear(&tv);
121 tv.tv_sec = 0;
122 tv.tv_usec = 3000;
123
124 EVLOCK_LOCK(cw.lock, 0);
125 /* we need to make sure that event does not happen before
126 * we get to wait on the conditional variable */
127 assert(evtimer_add(&ev, &tv) == 0);
128
129 assert(EVTHREAD_COND_WAIT(cw.cond, cw.lock) == 0);
130 EVLOCK_UNLOCK(cw.lock, 0);
131
132 EVLOCK_LOCK(count_lock, 0);
133 ++count;
134 EVLOCK_UNLOCK(count_lock, 0);
135 }
136
137 /* exit the loop only if all threads fired all timeouts */
138 EVLOCK_LOCK(count_lock, 0);
139 if (count >= NUM_THREADS * NUM_ITERATIONS)
140 event_base_loopexit(base, NULL);
141 EVLOCK_UNLOCK(count_lock, 0);
142
143 EVTHREAD_FREE_LOCK(cw.lock, 0);
144 EVTHREAD_FREE_COND(cw.cond);
145
146 THREAD_RETURN();
147 }
148
149 static int notification_fd_used = 0;
150 #ifndef _WIN32
151 static int got_sigchld = 0;
152 static void
sigchld_cb(evutil_socket_t fd,short event,void * arg)153 sigchld_cb(evutil_socket_t fd, short event, void *arg)
154 {
155 struct timeval tv;
156 struct event_base *base = arg;
157
158 got_sigchld++;
159 tv.tv_usec = 100000;
160 tv.tv_sec = 0;
161 event_base_loopexit(base, &tv);
162 }
163
164
165 static void
notify_fd_cb(evutil_socket_t fd,short event,void * arg)166 notify_fd_cb(evutil_socket_t fd, short event, void *arg)
167 {
168 ++notification_fd_used;
169 }
170 #endif
171
172 static void
thread_basic(void * arg)173 thread_basic(void *arg)
174 {
175 THREAD_T threads[NUM_THREADS];
176 struct event ev;
177 struct timeval tv;
178 int i;
179 struct basic_test_data *data = arg;
180 struct event_base *base = data->base;
181
182 struct event *notification_event = NULL;
183 struct event *sigchld_event = NULL;
184
185 EVTHREAD_ALLOC_LOCK(count_lock, 0);
186 tt_assert(count_lock);
187
188 tt_assert(base);
189 if (evthread_make_base_notifiable(base)<0) {
190 tt_abort_msg("Couldn't make base notifiable!");
191 }
192
193 #ifndef _WIN32
194 if (data->setup_data && !strcmp(data->setup_data, "forking")) {
195 pid_t pid;
196 int status;
197 sigchld_event = evsignal_new(base, SIGCHLD, sigchld_cb, base);
198 /* This piggybacks on the th_notify_fd weirdly, and looks
199 * inside libevent internals. Not a good idea in non-testing
200 * code! */
201 notification_event = event_new(base,
202 base->th_notify_fd[0], EV_READ|EV_PERSIST, notify_fd_cb,
203 NULL);
204 event_add(sigchld_event, NULL);
205 event_add(notification_event, NULL);
206
207 if ((pid = fork()) == 0) {
208 event_del(notification_event);
209 if (event_reinit(base) < 0) {
210 TT_FAIL(("reinit"));
211 exit(1);
212 }
213 event_assign(notification_event, base,
214 base->th_notify_fd[0], EV_READ|EV_PERSIST,
215 notify_fd_cb, NULL);
216 event_add(notification_event, NULL);
217 goto child;
218 }
219
220 event_base_dispatch(base);
221
222 if (waitpid(pid, &status, 0) == -1)
223 tt_abort_perror("waitpid");
224 TT_BLATHER(("Waitpid okay\n"));
225
226 tt_assert(got_sigchld);
227 tt_int_op(notification_fd_used, ==, 0);
228
229 goto end;
230 }
231
232 child:
233 #endif
234 for (i = 0; i < NUM_THREADS; ++i)
235 THREAD_START(threads[i], basic_thread, base);
236
237 evtimer_assign(&ev, base, NULL, NULL);
238 evutil_timerclear(&tv);
239 tv.tv_sec = 1000;
240 event_add(&ev, &tv);
241
242 event_base_dispatch(base);
243
244 for (i = 0; i < NUM_THREADS; ++i)
245 THREAD_JOIN(threads[i]);
246
247 event_del(&ev);
248
249 tt_int_op(count, ==, NUM_THREADS * NUM_ITERATIONS);
250
251 EVTHREAD_FREE_LOCK(count_lock, 0);
252
253 TT_BLATHER(("notifiations==%d", notification_fd_used));
254
255 end:
256
257 if (notification_event)
258 event_free(notification_event);
259 if (sigchld_event)
260 event_free(sigchld_event);
261 }
262
263 #undef NUM_THREADS
264 #define NUM_THREADS 10
265
266 struct alerted_record {
267 struct cond_wait *cond;
268 struct timeval delay;
269 struct timeval alerted_at;
270 int timed_out;
271 };
272
273 #if 0
274 static THREAD_FN
275 wait_for_condition(void *arg)
276 {
277 struct alerted_record *rec = arg;
278 int r;
279
280 EVLOCK_LOCK(rec->cond->lock, 0);
281 if (rec->delay.tv_sec || rec->delay.tv_usec) {
282 r = EVTHREAD_COND_WAIT_TIMED(rec->cond->cond, rec->cond->lock,
283 &rec->delay);
284 } else {
285 r = EVTHREAD_COND_WAIT(rec->cond->cond, rec->cond->lock);
286 }
287 EVLOCK_UNLOCK(rec->cond->lock, 0);
288
289 evutil_gettimeofday(&rec->alerted_at, NULL);
290 if (r == 1)
291 rec->timed_out = 1;
292
293 THREAD_RETURN();
294 }
295
296 static void
297 thread_conditions_simple(void *arg)
298 {
299 struct timeval tv_signal, tv_timeout, tv_broadcast;
300 struct alerted_record alerted[NUM_THREADS];
301 THREAD_T threads[NUM_THREADS];
302 struct cond_wait cond;
303 int i;
304 struct timeval launched_at;
305 struct event wake_one;
306 struct event wake_all;
307 struct basic_test_data *data = arg;
308 struct event_base *base = data->base;
309 int n_timed_out=0, n_signal=0, n_broadcast=0;
310
311 tv_signal.tv_sec = tv_timeout.tv_sec = tv_broadcast.tv_sec = 0;
312 tv_signal.tv_usec = 30*1000;
313 tv_timeout.tv_usec = 150*1000;
314 tv_broadcast.tv_usec = 500*1000;
315
316 EVTHREAD_ALLOC_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
317 EVTHREAD_ALLOC_COND(cond.cond);
318 tt_assert(cond.lock);
319 tt_assert(cond.cond);
320 for (i = 0; i < NUM_THREADS; ++i) {
321 memset(&alerted[i], 0, sizeof(struct alerted_record));
322 alerted[i].cond = &cond;
323 }
324
325 /* Threads 5 and 6 will be allowed to time out */
326 memcpy(&alerted[5].delay, &tv_timeout, sizeof(tv_timeout));
327 memcpy(&alerted[6].delay, &tv_timeout, sizeof(tv_timeout));
328
329 evtimer_assign(&wake_one, base, wake_one_timeout, &cond);
330 evtimer_assign(&wake_all, base, wake_all_timeout, &cond);
331
332 evutil_gettimeofday(&launched_at, NULL);
333
334 /* Launch the threads... */
335 for (i = 0; i < NUM_THREADS; ++i) {
336 THREAD_START(threads[i], wait_for_condition, &alerted[i]);
337 }
338
339 /* Start the timers... */
340 tt_int_op(event_add(&wake_one, &tv_signal), ==, 0);
341 tt_int_op(event_add(&wake_all, &tv_broadcast), ==, 0);
342
343 /* And run for a bit... */
344 event_base_dispatch(base);
345
346 /* And wait till the threads are done. */
347 for (i = 0; i < NUM_THREADS; ++i)
348 THREAD_JOIN(threads[i]);
349
350 /* Now, let's see what happened. At least one of 5 or 6 should
351 * have timed out. */
352 n_timed_out = alerted[5].timed_out + alerted[6].timed_out;
353 tt_int_op(n_timed_out, >=, 1);
354 tt_int_op(n_timed_out, <=, 2);
355
356 for (i = 0; i < NUM_THREADS; ++i) {
357 const struct timeval *target_delay;
358 struct timeval target_time, actual_delay;
359 if (alerted[i].timed_out) {
360 TT_BLATHER(("%d looks like a timeout\n", i));
361 target_delay = &tv_timeout;
362 tt_assert(i == 5 || i == 6);
363 } else if (evutil_timerisset(&alerted[i].alerted_at)) {
364 long diff1,diff2;
365 evutil_timersub(&alerted[i].alerted_at,
366 &launched_at, &actual_delay);
367 diff1 = timeval_msec_diff(&actual_delay,
368 &tv_signal);
369 diff2 = timeval_msec_diff(&actual_delay,
370 &tv_broadcast);
371 if (labs(diff1) < labs(diff2)) {
372 TT_BLATHER(("%d looks like a signal\n", i));
373 target_delay = &tv_signal;
374 ++n_signal;
375 } else {
376 TT_BLATHER(("%d looks like a broadcast\n", i));
377 target_delay = &tv_broadcast;
378 ++n_broadcast;
379 }
380 } else {
381 TT_FAIL(("Thread %d never got woken", i));
382 continue;
383 }
384 evutil_timeradd(target_delay, &launched_at, &target_time);
385 test_timeval_diff_leq(&target_time, &alerted[i].alerted_at,
386 0, 200);
387 }
388 tt_int_op(n_broadcast + n_signal + n_timed_out, ==, NUM_THREADS);
389 tt_int_op(n_signal, ==, 1);
390
391 end:
392 EVTHREAD_FREE_LOCK(cond.lock, EVTHREAD_LOCKTYPE_RECURSIVE);
393 EVTHREAD_FREE_COND(cond.cond);
394 }
395 #endif
396
397 #define CB_COUNT 128
398 #define QUEUE_THREAD_COUNT 8
399
400 static void
SLEEP_MS(int ms)401 SLEEP_MS(int ms)
402 {
403 struct timeval tv;
404 tv.tv_sec = ms/1000;
405 tv.tv_usec = (ms%1000)*1000;
406 evutil_usleep_(&tv);
407 }
408
409 struct deferred_test_data {
410 struct event_callback cbs[CB_COUNT];
411 struct event_base *queue;
412 };
413
414 static struct timeval timer_start = {0,0};
415 static struct timeval timer_end = {0,0};
416 static unsigned callback_count = 0;
417 static THREAD_T load_threads[QUEUE_THREAD_COUNT];
418 static struct deferred_test_data deferred_data[QUEUE_THREAD_COUNT];
419
420 static void
deferred_callback(struct event_callback * cb,void * arg)421 deferred_callback(struct event_callback *cb, void *arg)
422 {
423 SLEEP_MS(1);
424 callback_count += 1;
425 }
426
427 static THREAD_FN
load_deferred_queue(void * arg)428 load_deferred_queue(void *arg)
429 {
430 struct deferred_test_data *data = arg;
431 size_t i;
432
433 for (i = 0; i < CB_COUNT; ++i) {
434 event_deferred_cb_init_(&data->cbs[i], 0, deferred_callback,
435 NULL);
436 event_deferred_cb_schedule_(data->queue, &data->cbs[i]);
437 SLEEP_MS(1);
438 }
439
440 THREAD_RETURN();
441 }
442
443 static void
timer_callback(evutil_socket_t fd,short what,void * arg)444 timer_callback(evutil_socket_t fd, short what, void *arg)
445 {
446 evutil_gettimeofday(&timer_end, NULL);
447 }
448
449 static void
start_threads_callback(evutil_socket_t fd,short what,void * arg)450 start_threads_callback(evutil_socket_t fd, short what, void *arg)
451 {
452 int i;
453
454 for (i = 0; i < QUEUE_THREAD_COUNT; ++i) {
455 THREAD_START(load_threads[i], load_deferred_queue,
456 &deferred_data[i]);
457 }
458 }
459
460 static void
thread_deferred_cb_skew(void * arg)461 thread_deferred_cb_skew(void *arg)
462 {
463 struct timeval tv_timer = {1, 0};
464 struct event_base *base = NULL;
465 struct event_config *cfg = NULL;
466 struct timeval elapsed;
467 int elapsed_usec;
468 int i;
469
470 cfg = event_config_new();
471 tt_assert(cfg);
472 event_config_set_max_dispatch_interval(cfg, NULL, 16, 0);
473
474 base = event_base_new_with_config(cfg);
475 tt_assert(base);
476
477 for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
478 deferred_data[i].queue = base;
479
480 evutil_gettimeofday(&timer_start, NULL);
481 event_base_once(base, -1, EV_TIMEOUT, timer_callback, NULL,
482 &tv_timer);
483 event_base_once(base, -1, EV_TIMEOUT, start_threads_callback,
484 NULL, NULL);
485 event_base_dispatch(base);
486
487 evutil_timersub(&timer_end, &timer_start, &elapsed);
488 TT_BLATHER(("callback count, %u", callback_count));
489 elapsed_usec =
490 (unsigned)(elapsed.tv_sec*1000000 + elapsed.tv_usec);
491 TT_BLATHER(("elapsed time, %u usec", elapsed_usec));
492
493 /* XXX be more intelligent here. just make sure skew is
494 * within .4 seconds for now. */
495 tt_assert(elapsed_usec >= 600000 && elapsed_usec <= 1400000);
496
497 end:
498 for (i = 0; i < QUEUE_THREAD_COUNT; ++i)
499 THREAD_JOIN(load_threads[i]);
500 if (base)
501 event_base_free(base);
502 if (cfg)
503 event_config_free(cfg);
504 }
505
506 static struct event time_events[5];
507 static struct timeval times[5];
508 static struct event_base *exit_base = NULL;
509 static void
note_time_cb(evutil_socket_t fd,short what,void * arg)510 note_time_cb(evutil_socket_t fd, short what, void *arg)
511 {
512 evutil_gettimeofday(arg, NULL);
513 if (arg == ×[4]) {
514 event_base_loopbreak(exit_base);
515 }
516 }
517 static THREAD_FN
register_events_subthread(void * arg)518 register_events_subthread(void *arg)
519 {
520 struct timeval tv = {0,0};
521 SLEEP_MS(100);
522 event_active(&time_events[0], EV_TIMEOUT, 1);
523 SLEEP_MS(100);
524 event_active(&time_events[1], EV_TIMEOUT, 1);
525 SLEEP_MS(100);
526 tv.tv_usec = 100*1000;
527 event_add(&time_events[2], &tv);
528 tv.tv_usec = 150*1000;
529 event_add(&time_events[3], &tv);
530 SLEEP_MS(200);
531 event_active(&time_events[4], EV_TIMEOUT, 1);
532
533 THREAD_RETURN();
534 }
535
536 static void
thread_no_events(void * arg)537 thread_no_events(void *arg)
538 {
539 THREAD_T thread;
540 struct basic_test_data *data = arg;
541 struct timeval starttime, endtime;
542 int i;
543 exit_base = data->base;
544
545 memset(times,0,sizeof(times));
546 for (i=0;i<5;++i) {
547 event_assign(&time_events[i], data->base,
548 -1, 0, note_time_cb, ×[i]);
549 }
550
551 evutil_gettimeofday(&starttime, NULL);
552 THREAD_START(thread, register_events_subthread, data->base);
553 event_base_loop(data->base, EVLOOP_NO_EXIT_ON_EMPTY);
554 evutil_gettimeofday(&endtime, NULL);
555 tt_assert(event_base_got_break(data->base));
556 THREAD_JOIN(thread);
557 for (i=0; i<5; ++i) {
558 struct timeval diff;
559 double sec;
560 evutil_timersub(×[i], &starttime, &diff);
561 sec = diff.tv_sec + diff.tv_usec/1.0e6;
562 TT_BLATHER(("event %d at %.4f seconds", i, sec));
563 }
564 test_timeval_diff_eq(&starttime, ×[0], 100);
565 test_timeval_diff_eq(&starttime, ×[1], 200);
566 test_timeval_diff_eq(&starttime, ×[2], 400);
567 test_timeval_diff_eq(&starttime, ×[3], 450);
568 test_timeval_diff_eq(&starttime, ×[4], 500);
569 test_timeval_diff_eq(&starttime, &endtime, 500);
570
571 end:
572 ;
573 }
574
575 #define TEST(name, f) \
576 { #name, thread_##name, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE|(f), \
577 &basic_setup, NULL }
578
579 struct testcase_t thread_testcases[] = {
580 { "basic", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
581 &basic_setup, NULL },
582 #ifndef _WIN32
583 { "forking", thread_basic, TT_FORK|TT_NEED_THREADS|TT_NEED_BASE,
584 &basic_setup, __UNCONST("forking") },
585 #endif
586 #if 0
587 TEST(conditions_simple, TT_RETRIABLE),
588 #endif
589 { "deferred_cb_skew", thread_deferred_cb_skew,
590 TT_FORK|TT_NEED_THREADS|TT_OFF_BY_DEFAULT,
591 &basic_setup, NULL },
592 #ifndef _WIN32
593 /****** XXX TODO FIXME windows seems to be having some timing trouble,
594 * looking into it now. / ellzey
595 ******/
596 TEST(no_events, TT_RETRIABLE),
597 #endif
598 END_OF_TESTCASES
599 };
600
601