1 /* $NetBSD: regress.c,v 1.11 2021/04/10 19:02:37 rillig Exp $ */
2
3 /*
4 * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
5 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29 #include "util-internal.h"
30
31 #ifdef _WIN32
32 #include <winsock2.h>
33 #include <windows.h>
34 #endif
35
36 #include "event2/event-config.h"
37 #include <sys/cdefs.h>
38 __RCSID("$NetBSD: regress.c,v 1.11 2021/04/10 19:02:37 rillig Exp $");
39
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #ifdef EVENT__HAVE_SYS_TIME_H
43 #include <sys/time.h>
44 #endif
45 #include <sys/queue.h>
46 #ifndef _WIN32
47 #include <sys/socket.h>
48 #include <sys/wait.h>
49 #include <limits.h>
50 #include <signal.h>
51 #include <unistd.h>
52 #include <netdb.h>
53 #endif
54 #include <fcntl.h>
55 #include <signal.h>
56 #include <stdlib.h>
57 #include <stdio.h>
58 #include <string.h>
59 #include <errno.h>
60 #include <assert.h>
61 #include <ctype.h>
62
63 #include "event2/event.h"
64 #include "event2/event_struct.h"
65 #include "event2/event_compat.h"
66 #include "event2/tag.h"
67 #include "event2/buffer.h"
68 #include "event2/buffer_compat.h"
69 #include "event2/util.h"
70 #include "event-internal.h"
71 #include "evthread-internal.h"
72 #include "log-internal.h"
73 #include "time-internal.h"
74
75 #include "regress.h"
76 #include "regress_thread.h"
77
78 #ifndef _WIN32
79 #include "regress.gen.h"
80 #endif
81
82 evutil_socket_t pair[2];
83 int test_ok;
84 int called;
85 struct event_base *global_base;
86
87 static char wbuf[4096];
88 static char rbuf[4096];
89 static int woff;
90 static int roff;
91 static int usepersist;
92 static struct timeval tset;
93 static struct timeval tcalled;
94
95
96 #define TEST1 "this is a test"
97
98 #ifdef _WIN32
99 #define write(fd,buf,len) send((fd),(buf),(int)(len),0)
100 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0)
101 #endif
102
103 struct basic_cb_args
104 {
105 struct event_base *eb;
106 struct event *ev;
107 unsigned int callcount;
108 };
109
110 static void
simple_read_cb(evutil_socket_t fd,short event,void * arg)111 simple_read_cb(evutil_socket_t fd, short event, void *arg)
112 {
113 char buf[256];
114 int len;
115
116 len = read(fd, buf, sizeof(buf));
117
118 if (len) {
119 if (!called) {
120 if (event_add(arg, NULL) == -1)
121 exit(1);
122 }
123 } else if (called == 1)
124 test_ok = 1;
125
126 called++;
127 }
128
129 static void
basic_read_cb(evutil_socket_t fd,short event,void * data)130 basic_read_cb(evutil_socket_t fd, short event, void *data)
131 {
132 char buf[256];
133 int len;
134 struct basic_cb_args *arg = data;
135
136 len = read(fd, buf, sizeof(buf));
137
138 if (len < 0) {
139 tt_fail_perror("read (callback)");
140 } else {
141 switch (arg->callcount++) {
142 case 0: /* first call: expect to read data; cycle */
143 if (len > 0)
144 return;
145
146 tt_fail_msg("EOF before data read");
147 break;
148
149 case 1: /* second call: expect EOF; stop */
150 if (len > 0)
151 tt_fail_msg("not all data read on first cycle");
152 break;
153
154 default: /* third call: should not happen */
155 tt_fail_msg("too many cycles");
156 }
157 }
158
159 event_del(arg->ev);
160 event_base_loopexit(arg->eb, NULL);
161 }
162
163 static void
dummy_read_cb(evutil_socket_t fd,short event,void * arg)164 dummy_read_cb(evutil_socket_t fd, short event, void *arg)
165 {
166 }
167
168 static void
simple_write_cb(evutil_socket_t fd,short event,void * arg)169 simple_write_cb(evutil_socket_t fd, short event, void *arg)
170 {
171 int len;
172
173 len = write(fd, TEST1, strlen(TEST1) + 1);
174 if (len == -1)
175 test_ok = 0;
176 else
177 test_ok = 1;
178 }
179
180 static void
multiple_write_cb(evutil_socket_t fd,short event,void * arg)181 multiple_write_cb(evutil_socket_t fd, short event, void *arg)
182 {
183 struct event *ev = arg;
184 int len;
185
186 len = 128;
187 if (woff + len >= (int)sizeof(wbuf))
188 len = sizeof(wbuf) - woff;
189
190 len = write(fd, wbuf + woff, len);
191 if (len == -1) {
192 fprintf(stderr, "%s: write\n", __func__);
193 if (usepersist)
194 event_del(ev);
195 return;
196 }
197
198 woff += len;
199
200 if (woff >= (int)sizeof(wbuf)) {
201 shutdown(fd, EVUTIL_SHUT_WR);
202 if (usepersist)
203 event_del(ev);
204 return;
205 }
206
207 if (!usepersist) {
208 if (event_add(ev, NULL) == -1)
209 exit(1);
210 }
211 }
212
213 static void
multiple_read_cb(evutil_socket_t fd,short event,void * arg)214 multiple_read_cb(evutil_socket_t fd, short event, void *arg)
215 {
216 struct event *ev = arg;
217 int len;
218
219 len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
220 if (len == -1)
221 fprintf(stderr, "%s: read\n", __func__);
222 if (len <= 0) {
223 if (usepersist)
224 event_del(ev);
225 return;
226 }
227
228 roff += len;
229 if (!usepersist) {
230 if (event_add(ev, NULL) == -1)
231 exit(1);
232 }
233 }
234
235 static void
timeout_cb(evutil_socket_t fd,short event,void * arg)236 timeout_cb(evutil_socket_t fd, short event, void *arg)
237 {
238 evutil_gettimeofday(&tcalled, NULL);
239 }
240
241 struct both {
242 struct event ev;
243 int nread;
244 };
245
246 static void
combined_read_cb(evutil_socket_t fd,short event,void * arg)247 combined_read_cb(evutil_socket_t fd, short event, void *arg)
248 {
249 struct both *both = arg;
250 char buf[128];
251 int len;
252
253 len = read(fd, buf, sizeof(buf));
254 if (len == -1)
255 fprintf(stderr, "%s: read\n", __func__);
256 if (len <= 0)
257 return;
258
259 both->nread += len;
260 if (event_add(&both->ev, NULL) == -1)
261 exit(1);
262 }
263
264 static void
combined_write_cb(evutil_socket_t fd,short event,void * arg)265 combined_write_cb(evutil_socket_t fd, short event, void *arg)
266 {
267 struct both *both = arg;
268 char buf[128];
269 int len;
270
271 len = sizeof(buf);
272 if (len > both->nread)
273 len = both->nread;
274
275 memset(buf, 'q', len);
276
277 len = write(fd, buf, len);
278 if (len == -1)
279 fprintf(stderr, "%s: write\n", __func__);
280 if (len <= 0) {
281 shutdown(fd, EVUTIL_SHUT_WR);
282 return;
283 }
284
285 both->nread -= len;
286 if (event_add(&both->ev, NULL) == -1)
287 exit(1);
288 }
289
290 /* These macros used to replicate the work of the legacy test wrapper code */
291 #define setup_test(x) do { \
292 if (!in_legacy_test_wrapper) { \
293 TT_FAIL(("Legacy test %s not wrapped properly", x)); \
294 return; \
295 } \
296 } while (0)
297 #define cleanup_test() setup_test("cleanup")
298
299 static void
test_simpleread(void)300 test_simpleread(void)
301 {
302 struct event ev;
303
304 /* Very simple read test */
305 setup_test("Simple read: ");
306
307 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
308 tt_fail_perror("write");
309 }
310
311 shutdown(pair[0], EVUTIL_SHUT_WR);
312
313 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
314 if (event_add(&ev, NULL) == -1)
315 exit(1);
316 event_dispatch();
317
318 cleanup_test();
319 }
320
321 static void
test_simplewrite(void)322 test_simplewrite(void)
323 {
324 struct event ev;
325
326 /* Very simple write test */
327 setup_test("Simple write: ");
328
329 event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
330 if (event_add(&ev, NULL) == -1)
331 exit(1);
332 event_dispatch();
333
334 cleanup_test();
335 }
336
337 static void
simpleread_multiple_cb(evutil_socket_t fd,short event,void * arg)338 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg)
339 {
340 if (++called == 2)
341 test_ok = 1;
342 }
343
344 static void
test_simpleread_multiple(void)345 test_simpleread_multiple(void)
346 {
347 struct event one, two;
348
349 /* Very simple read test */
350 setup_test("Simple read to multiple evens: ");
351
352 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
353 tt_fail_perror("write");
354 }
355
356 shutdown(pair[0], EVUTIL_SHUT_WR);
357
358 event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
359 if (event_add(&one, NULL) == -1)
360 exit(1);
361 event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
362 if (event_add(&two, NULL) == -1)
363 exit(1);
364 event_dispatch();
365
366 cleanup_test();
367 }
368
369 static int have_closed = 0;
370 static int premature_event = 0;
371 static void
simpleclose_close_fd_cb(evutil_socket_t s,short what,void * ptr)372 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr)
373 {
374 evutil_socket_t **fds = ptr;
375 TT_BLATHER(("Closing"));
376 evutil_closesocket(*fds[0]);
377 evutil_closesocket(*fds[1]);
378 *fds[0] = -1;
379 *fds[1] = -1;
380 have_closed = 1;
381 }
382
383 static void
record_event_cb(evutil_socket_t s,short what,void * ptr)384 record_event_cb(evutil_socket_t s, short what, void *ptr)
385 {
386 short *whatp = ptr;
387 if (!have_closed)
388 premature_event = 1;
389 *whatp = what;
390 TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s));
391 }
392
393 static void
test_simpleclose_rw(void * ptr)394 test_simpleclose_rw(void *ptr)
395 {
396 /* Test that a close of FD is detected as a read and as a write. */
397 struct event_base *base = event_base_new();
398 evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1};
399 evutil_socket_t *to_close[2];
400 struct event *rev=NULL, *wev=NULL, *closeev=NULL;
401 struct timeval tv;
402 short got_read_on_close = 0, got_write_on_close = 0;
403 char buf[1024];
404 memset(buf, 99, sizeof(buf));
405 #ifdef _WIN32
406 #define LOCAL_SOCKETPAIR_AF AF_INET
407 #else
408 #define LOCAL_SOCKETPAIR_AF AF_UNIX
409 #endif
410 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0)
411 TT_DIE(("socketpair: %s", strerror(errno)));
412 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0)
413 TT_DIE(("socketpair: %s", strerror(errno)));
414 if (evutil_make_socket_nonblocking(pair1[1]) < 0)
415 TT_DIE(("make_socket_nonblocking"));
416 if (evutil_make_socket_nonblocking(pair2[1]) < 0)
417 TT_DIE(("make_socket_nonblocking"));
418
419 /** Stuff pair2[1] full of data, until write fails */
420 while (1) {
421 int r = write(pair2[1], buf, sizeof(buf));
422 if (r<0) {
423 int err = evutil_socket_geterror(pair2[1]);
424 if (! EVUTIL_ERR_RW_RETRIABLE(err))
425 TT_DIE(("write failed strangely: %s",
426 evutil_socket_error_to_string(err)));
427 break;
428 }
429 }
430 to_close[0] = &pair1[0];
431 to_close[1] = &pair2[0];
432
433 closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb,
434 to_close);
435 rev = event_new(base, pair1[1], EV_READ, record_event_cb,
436 &got_read_on_close);
437 TT_BLATHER(("Waiting for read on %d", (int)pair1[1]));
438 wev = event_new(base, pair2[1], EV_WRITE, record_event_cb,
439 &got_write_on_close);
440 TT_BLATHER(("Waiting for write on %d", (int)pair2[1]));
441 tv.tv_sec = 0;
442 tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make
443 * sure we get a read event. */
444 event_add(closeev, &tv);
445 event_add(rev, NULL);
446 event_add(wev, NULL);
447 /* Don't let the test go on too long. */
448 tv.tv_sec = 0;
449 tv.tv_usec = 200*1000;
450 event_base_loopexit(base, &tv);
451 event_base_loop(base, 0);
452
453 tt_int_op(got_read_on_close, ==, EV_READ);
454 tt_int_op(got_write_on_close, ==, EV_WRITE);
455 tt_int_op(premature_event, ==, 0);
456
457 end:
458 if (pair1[0] >= 0)
459 evutil_closesocket(pair1[0]);
460 if (pair1[1] >= 0)
461 evutil_closesocket(pair1[1]);
462 if (pair2[0] >= 0)
463 evutil_closesocket(pair2[0]);
464 if (pair2[1] >= 0)
465 evutil_closesocket(pair2[1]);
466 if (rev)
467 event_free(rev);
468 if (wev)
469 event_free(wev);
470 if (closeev)
471 event_free(closeev);
472 if (base)
473 event_base_free(base);
474 }
475
476 static void
test_simpleclose(void * ptr)477 test_simpleclose(void *ptr)
478 {
479 struct basic_test_data *data = ptr;
480 struct event_base *base = data->base;
481 evutil_socket_t *xpair = data->pair;
482 const char *flags = (const char *)data->setup_data;
483 int et = !!strstr(flags, "ET");
484 int persist = !!strstr(flags, "persist");
485 short events = EV_CLOSED | (et ? EV_ET : 0) | (persist ? EV_PERSIST : 0);
486 struct event *ev = NULL;
487 short got_event;
488
489 if (!(event_base_get_features(data->base) & EV_FEATURE_EARLY_CLOSE))
490 tt_skip();
491
492 /* XXX: should this code moved to regress_et.c ? */
493 if (et && !(event_base_get_features(data->base) & EV_FEATURE_ET))
494 tt_skip();
495
496 ev = event_new(base, xpair[0], events, record_event_cb, &got_event);
497 tt_assert(ev);
498 tt_assert(!event_add(ev, NULL));
499
500 got_event = 0;
501 if (strstr(flags, "close")) {
502 tt_assert(!evutil_closesocket(xpair[1]));
503 /* avoid closing in setup routines */
504 xpair[1] = -1;
505 } else if (strstr(flags, "shutdown")) {
506 tt_assert(!shutdown(xpair[1], EVUTIL_SHUT_WR));
507 } else {
508 tt_abort_msg("unknown flags");
509 }
510
511 /* w/o edge-triggerd but w/ persist it will not stop */
512 if (!et && persist) {
513 struct timeval tv;
514 tv.tv_sec = 0;
515 tv.tv_usec = 10000;
516 tt_assert(!event_base_loopexit(base, &tv));
517 }
518
519 tt_int_op(event_base_loop(base, EVLOOP_NONBLOCK), ==, !persist);
520 tt_int_op(got_event, ==, (events & ~EV_PERSIST));
521
522 end:
523 if (ev)
524 event_free(ev);
525 }
526
527 static void
test_multiple(void)528 test_multiple(void)
529 {
530 struct event ev, ev2;
531 int i;
532
533 /* Multiple read and write test */
534 setup_test("Multiple read/write: ");
535 memset(rbuf, 0, sizeof(rbuf));
536 for (i = 0; i < (int)sizeof(wbuf); i++)
537 wbuf[i] = i;
538
539 roff = woff = 0;
540 usepersist = 0;
541
542 event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
543 if (event_add(&ev, NULL) == -1)
544 exit(1);
545 event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
546 if (event_add(&ev2, NULL) == -1)
547 exit(1);
548 event_dispatch();
549
550 if (roff == woff)
551 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
552
553 cleanup_test();
554 }
555
556 static void
test_persistent(void)557 test_persistent(void)
558 {
559 struct event ev, ev2;
560 int i;
561
562 /* Multiple read and write test with persist */
563 setup_test("Persist read/write: ");
564 memset(rbuf, 0, sizeof(rbuf));
565 for (i = 0; i < (int)sizeof(wbuf); i++)
566 wbuf[i] = i;
567
568 roff = woff = 0;
569 usepersist = 1;
570
571 event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
572 if (event_add(&ev, NULL) == -1)
573 exit(1);
574 event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
575 if (event_add(&ev2, NULL) == -1)
576 exit(1);
577 event_dispatch();
578
579 if (roff == woff)
580 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
581
582 cleanup_test();
583 }
584
585 static void
test_combined(void)586 test_combined(void)
587 {
588 struct both r1, r2, w1, w2;
589
590 setup_test("Combined read/write: ");
591 memset(&r1, 0, sizeof(r1));
592 memset(&r2, 0, sizeof(r2));
593 memset(&w1, 0, sizeof(w1));
594 memset(&w2, 0, sizeof(w2));
595
596 w1.nread = 4096;
597 w2.nread = 8192;
598
599 event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
600 event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
601 event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
602 event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
603 tt_assert(event_add(&r1.ev, NULL) != -1);
604 tt_assert(!event_add(&w1.ev, NULL));
605 tt_assert(!event_add(&r2.ev, NULL));
606 tt_assert(!event_add(&w2.ev, NULL));
607 event_dispatch();
608
609 if (r1.nread == 8192 && r2.nread == 4096)
610 test_ok = 1;
611
612 end:
613 cleanup_test();
614 }
615
616 static void
test_simpletimeout(void)617 test_simpletimeout(void)
618 {
619 struct timeval tv;
620 struct event ev;
621
622 setup_test("Simple timeout: ");
623
624 tv.tv_usec = 200*1000;
625 tv.tv_sec = 0;
626 evutil_timerclear(&tcalled);
627 evtimer_set(&ev, timeout_cb, NULL);
628 evtimer_add(&ev, &tv);
629
630 evutil_gettimeofday(&tset, NULL);
631 event_dispatch();
632 test_timeval_diff_eq(&tset, &tcalled, 200);
633
634 test_ok = 1;
635 end:
636 cleanup_test();
637 }
638
639 static void
periodic_timeout_cb(evutil_socket_t fd,short event,void * arg)640 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg)
641 {
642 int *count = arg;
643
644 (*count)++;
645 if (*count == 6) {
646 /* call loopexit only once - on slow machines(?), it is
647 * apparently possible for this to get called twice. */
648 test_ok = 1;
649 event_base_loopexit(global_base, NULL);
650 }
651 }
652
653 static void
test_persistent_timeout(void)654 test_persistent_timeout(void)
655 {
656 struct timeval tv;
657 struct event ev;
658 int count = 0;
659
660 evutil_timerclear(&tv);
661 tv.tv_usec = 10000;
662
663 event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST,
664 periodic_timeout_cb, &count);
665 event_add(&ev, &tv);
666
667 event_dispatch();
668
669 event_del(&ev);
670 }
671
672 static void
test_persistent_timeout_jump(void * ptr)673 test_persistent_timeout_jump(void *ptr)
674 {
675 struct basic_test_data *data = ptr;
676 struct event ev;
677 int count = 0;
678 struct timeval msec100 = { 0, 100 * 1000 };
679 struct timeval msec50 = { 0, 50 * 1000 };
680 struct timeval msec300 = { 0, 300 * 1000 };
681
682 event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count);
683 event_add(&ev, &msec100);
684 /* Wait for a bit */
685 evutil_usleep_(&msec300);
686 event_base_loopexit(data->base, &msec50);
687 event_base_dispatch(data->base);
688 tt_int_op(count, ==, 1);
689
690 end:
691 event_del(&ev);
692 }
693
694 struct persist_active_timeout_called {
695 int n;
696 short events[16];
697 struct timeval tvs[16];
698 };
699
700 static void
activate_cb(evutil_socket_t fd,short event,void * arg)701 activate_cb(evutil_socket_t fd, short event, void *arg)
702 {
703 struct event *ev = arg;
704 event_active(ev, EV_READ, 1);
705 }
706
707 static void
persist_active_timeout_cb(evutil_socket_t fd,short event,void * arg)708 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg)
709 {
710 struct persist_active_timeout_called *c = arg;
711 if (c->n < 15) {
712 c->events[c->n] = event;
713 evutil_gettimeofday(&c->tvs[c->n], NULL);
714 ++c->n;
715 }
716 }
717
718 static void
test_persistent_active_timeout(void * ptr)719 test_persistent_active_timeout(void *ptr)
720 {
721 struct timeval tv, tv2, tv_exit, start;
722 struct event ev;
723 struct persist_active_timeout_called res;
724
725 struct basic_test_data *data = ptr;
726 struct event_base *base = data->base;
727
728 memset(&res, 0, sizeof(res));
729
730 tv.tv_sec = 0;
731 tv.tv_usec = 200 * 1000;
732 event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST,
733 persist_active_timeout_cb, &res);
734 event_add(&ev, &tv);
735
736 tv2.tv_sec = 0;
737 tv2.tv_usec = 100 * 1000;
738 event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2);
739
740 tv_exit.tv_sec = 0;
741 tv_exit.tv_usec = 600 * 1000;
742 event_base_loopexit(base, &tv_exit);
743
744 event_base_assert_ok_(base);
745 evutil_gettimeofday(&start, NULL);
746
747 event_base_dispatch(base);
748 event_base_assert_ok_(base);
749
750 tt_int_op(res.n, ==, 3);
751 tt_int_op(res.events[0], ==, EV_READ);
752 tt_int_op(res.events[1], ==, EV_TIMEOUT);
753 tt_int_op(res.events[2], ==, EV_TIMEOUT);
754 test_timeval_diff_eq(&start, &res.tvs[0], 100);
755 test_timeval_diff_eq(&start, &res.tvs[1], 300);
756 test_timeval_diff_eq(&start, &res.tvs[2], 500);
757 end:
758 event_del(&ev);
759 }
760
761 struct common_timeout_info {
762 struct event ev;
763 struct timeval called_at;
764 int which;
765 int count;
766 };
767
768 static void
common_timeout_cb(evutil_socket_t fd,short event,void * arg)769 common_timeout_cb(evutil_socket_t fd, short event, void *arg)
770 {
771 struct common_timeout_info *ti = arg;
772 ++ti->count;
773 evutil_gettimeofday(&ti->called_at, NULL);
774 if (ti->count >= 4)
775 event_del(&ti->ev);
776 }
777
778 static void
test_common_timeout(void * ptr)779 test_common_timeout(void *ptr)
780 {
781 struct basic_test_data *data = ptr;
782
783 struct event_base *base = data->base;
784 int i;
785 struct common_timeout_info info[100];
786
787 struct timeval start;
788 struct timeval tmp_100_ms = { 0, 100*1000 };
789 struct timeval tmp_200_ms = { 0, 200*1000 };
790 struct timeval tmp_5_sec = { 5, 0 };
791 struct timeval tmp_5M_usec = { 0, 5*1000*1000 };
792
793 const struct timeval *ms_100, *ms_200, *sec_5;
794
795 ms_100 = event_base_init_common_timeout(base, &tmp_100_ms);
796 ms_200 = event_base_init_common_timeout(base, &tmp_200_ms);
797 sec_5 = event_base_init_common_timeout(base, &tmp_5_sec);
798 tt_assert(ms_100);
799 tt_assert(ms_200);
800 tt_assert(sec_5);
801 tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms),
802 ==, ms_200);
803 tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200);
804 tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5);
805 tt_int_op(ms_100->tv_sec, ==, 0);
806 tt_int_op(ms_200->tv_sec, ==, 0);
807 tt_int_op(sec_5->tv_sec, ==, 5);
808 tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000);
809 tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000);
810 tt_int_op(sec_5->tv_usec, ==, 0|0x50200000);
811
812 memset(info, 0, sizeof(info));
813
814 for (i=0; i<100; ++i) {
815 info[i].which = i;
816 event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST,
817 common_timeout_cb, &info[i]);
818 if (i % 2) {
819 if ((i%20)==1) {
820 /* Glass-box test: Make sure we survive the
821 * transition to non-common timeouts. It's
822 * a little tricky. */
823 event_add(&info[i].ev, ms_200);
824 event_add(&info[i].ev, &tmp_100_ms);
825 } else if ((i%20)==3) {
826 /* Check heap-to-common too. */
827 event_add(&info[i].ev, &tmp_200_ms);
828 event_add(&info[i].ev, ms_100);
829 } else if ((i%20)==5) {
830 /* Also check common-to-common. */
831 event_add(&info[i].ev, ms_200);
832 event_add(&info[i].ev, ms_100);
833 } else {
834 event_add(&info[i].ev, ms_100);
835 }
836 } else {
837 event_add(&info[i].ev, ms_200);
838 }
839 }
840
841 event_base_assert_ok_(base);
842 evutil_gettimeofday(&start, NULL);
843 event_base_dispatch(base);
844
845 event_base_assert_ok_(base);
846
847 for (i=0; i<10; ++i) {
848 tt_int_op(info[i].count, ==, 4);
849 if (i % 2) {
850 test_timeval_diff_eq(&start, &info[i].called_at, 400);
851 } else {
852 test_timeval_diff_eq(&start, &info[i].called_at, 800);
853 }
854 }
855
856 /* Make sure we can free the base with some events in. */
857 for (i=0; i<100; ++i) {
858 if (i % 2) {
859 event_add(&info[i].ev, ms_100);
860 } else {
861 event_add(&info[i].ev, ms_200);
862 }
863 }
864
865 end:
866 event_base_free(data->base); /* need to do this here before info is
867 * out-of-scope */
868 data->base = NULL;
869 }
870
871 #ifndef _WIN32
872
873 #define current_base event_global_current_base_
874 extern struct event_base *current_base;
875
876 static void
fork_signal_cb(evutil_socket_t fd,short events,void * arg)877 fork_signal_cb(evutil_socket_t fd, short events, void *arg)
878 {
879 event_del(arg);
880 }
881
882 int child_pair[2] = { -1, -1 };
883 static void
simple_child_read_cb(evutil_socket_t fd,short event,void * arg)884 simple_child_read_cb(evutil_socket_t fd, short event, void *arg)
885 {
886 char buf[256];
887 int len;
888
889 len = read(fd, buf, sizeof(buf));
890 if (write(child_pair[0], "", 1) < 0)
891 tt_fail_perror("write");
892
893 if (len) {
894 if (!called) {
895 if (event_add(arg, NULL) == -1)
896 exit(1);
897 }
898 } else if (called == 1)
899 test_ok = 1;
900
901 called++;
902 }
903
904 #define TEST_FORK_EXIT_SUCCESS 76
fork_wait_check(int pid)905 static void fork_wait_check(int pid)
906 {
907 int status;
908
909 TT_BLATHER(("Before waitpid"));
910
911 #ifdef WNOWAIT
912 if ((waitpid(pid, &status, WNOWAIT) == -1 && errno == EINVAL) &&
913 #else
914 if (
915 #endif
916 waitpid(pid, &status, 0) == -1) {
917 perror("waitpid");
918 exit(1);
919 }
920 TT_BLATHER(("After waitpid"));
921
922 if (WEXITSTATUS(status) != TEST_FORK_EXIT_SUCCESS) {
923 fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status));
924 exit(1);
925 }
926 }
927 static void
test_fork(void)928 test_fork(void)
929 {
930 char c;
931 struct event ev, sig_ev, usr_ev, existing_ev;
932 pid_t pid;
933
934 setup_test("After fork: ");
935
936 {
937 if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, child_pair) == -1) {
938 fprintf(stderr, "%s: socketpair\n", __func__);
939 exit(1);
940 }
941
942 if (evutil_make_socket_nonblocking(child_pair[0]) == -1) {
943 fprintf(stderr, "fcntl(O_NONBLOCK)");
944 exit(1);
945 }
946 }
947
948 tt_assert(current_base);
949 evthread_make_base_notifiable(current_base);
950
951 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
952 tt_fail_perror("write");
953 }
954
955 event_set(&ev, pair[1], EV_READ, simple_child_read_cb, &ev);
956 if (event_add(&ev, NULL) == -1)
957 exit(1);
958
959 evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev);
960 evsignal_add(&sig_ev, NULL);
961
962 evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev);
963 evsignal_add(&existing_ev, NULL);
964
965 event_base_assert_ok_(current_base);
966 TT_BLATHER(("Before fork"));
967 if ((pid = regress_fork()) == 0) {
968 /* in the child */
969 TT_BLATHER(("In child, before reinit"));
970 event_base_assert_ok_(current_base);
971 if (event_reinit(current_base) == -1) {
972 fprintf(stdout, "FAILED (reinit)\n");
973 exit(1);
974 }
975 TT_BLATHER(("After reinit"));
976 event_base_assert_ok_(current_base);
977 TT_BLATHER(("After assert-ok"));
978
979 evsignal_del(&sig_ev);
980
981 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
982 evsignal_add(&usr_ev, NULL);
983 kill(getpid(), SIGUSR1);
984 kill(getpid(), SIGUSR2);
985
986 called = 0;
987
988 event_dispatch();
989
990 event_base_free(current_base);
991
992 /* we do not send an EOF; simple_read_cb requires an EOF
993 * to set test_ok. we just verify that the callback was
994 * called. */
995 exit(test_ok != 0 || called != 2 ? -2 : TEST_FORK_EXIT_SUCCESS);
996 }
997
998 /** wait until client read first message */
999 if (read(child_pair[1], &c, 1) < 0) {
1000 tt_fail_perror("read");
1001 }
1002 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1003 tt_fail_perror("write");
1004 }
1005
1006 fork_wait_check(pid);
1007
1008 /* test that the current event loop still works */
1009 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1010 fprintf(stderr, "%s: write\n", __func__);
1011 }
1012
1013 shutdown(pair[0], EVUTIL_SHUT_WR);
1014
1015 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev);
1016 evsignal_add(&usr_ev, NULL);
1017 kill(getpid(), SIGUSR1);
1018 kill(getpid(), SIGUSR2);
1019
1020 event_dispatch();
1021
1022 evsignal_del(&sig_ev);
1023 tt_int_op(test_ok, ==, 1);
1024
1025 end:
1026 cleanup_test();
1027 if (child_pair[0] != -1)
1028 evutil_closesocket(child_pair[0]);
1029 if (child_pair[1] != -1)
1030 evutil_closesocket(child_pair[1]);
1031 }
1032
1033 #ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED
del_wait_thread(void * arg)1034 static void* del_wait_thread(void *arg)
1035 {
1036 struct timeval tv_start, tv_end;
1037
1038 evutil_gettimeofday(&tv_start, NULL);
1039 event_dispatch();
1040 evutil_gettimeofday(&tv_end, NULL);
1041
1042 test_timeval_diff_eq(&tv_start, &tv_end, 300);
1043
1044 end:
1045 return NULL;
1046 }
1047
1048 static void
del_wait_cb(evutil_socket_t fd,short event,void * arg)1049 del_wait_cb(evutil_socket_t fd, short event, void *arg)
1050 {
1051 struct timeval delay = { 0, 300*1000 };
1052 TT_BLATHER(("Sleeping: %i", test_ok));
1053 evutil_usleep_(&delay);
1054 ++test_ok;
1055 }
1056
1057 static void
test_del_wait(void)1058 test_del_wait(void)
1059 {
1060 struct event ev;
1061 THREAD_T thread;
1062
1063 setup_test("event_del will wait: ");
1064
1065 event_set(&ev, pair[1], EV_READ|EV_PERSIST, del_wait_cb, &ev);
1066 event_add(&ev, NULL);
1067
1068 THREAD_START(thread, del_wait_thread, NULL);
1069
1070 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
1071 tt_fail_perror("write");
1072 }
1073
1074 {
1075 struct timeval delay = { 0, 30*1000 };
1076 evutil_usleep_(&delay);
1077 }
1078
1079 {
1080 struct timeval tv_start, tv_end;
1081 evutil_gettimeofday(&tv_start, NULL);
1082 event_del(&ev);
1083 evutil_gettimeofday(&tv_end, NULL);
1084 test_timeval_diff_eq(&tv_start, &tv_end, 270);
1085 }
1086
1087 THREAD_JOIN(thread);
1088
1089 tt_int_op(test_ok, ==, 1);
1090
1091 end:
1092 ;
1093 }
1094
null_cb(evutil_socket_t fd,short what,void * arg)1095 static void null_cb(evutil_socket_t fd, short what, void *arg) {}
test_del_notify_thread(void * arg)1096 static void* test_del_notify_thread(void *arg)
1097 {
1098 event_dispatch();
1099 return NULL;
1100 }
1101 static void
test_del_notify(void)1102 test_del_notify(void)
1103 {
1104 struct event ev;
1105 THREAD_T thread;
1106
1107 test_ok = 1;
1108
1109 event_set(&ev, -1, EV_READ, null_cb, &ev);
1110 event_add(&ev, NULL);
1111
1112 THREAD_START(thread, test_del_notify_thread, NULL);
1113
1114 {
1115 struct timeval delay = { 0, 1000 };
1116 evutil_usleep_(&delay);
1117 }
1118
1119 event_del(&ev);
1120 THREAD_JOIN(thread);
1121 }
1122 #endif
1123
1124 static void
signal_cb_sa(int sig)1125 signal_cb_sa(int sig)
1126 {
1127 test_ok = 2;
1128 }
1129
1130 static void
signal_cb(evutil_socket_t fd,short event,void * arg)1131 signal_cb(evutil_socket_t fd, short event, void *arg)
1132 {
1133 struct event *ev = arg;
1134
1135 evsignal_del(ev);
1136 test_ok = 1;
1137 }
1138
1139 static void
test_simplesignal_impl(int find_reorder)1140 test_simplesignal_impl(int find_reorder)
1141 {
1142 struct event ev;
1143 struct itimerval itv;
1144
1145 evsignal_set(&ev, SIGALRM, signal_cb, &ev);
1146 evsignal_add(&ev, NULL);
1147 /* find bugs in which operations are re-ordered */
1148 if (find_reorder) {
1149 evsignal_del(&ev);
1150 evsignal_add(&ev, NULL);
1151 }
1152
1153 memset(&itv, 0, sizeof(itv));
1154 itv.it_value.tv_sec = 0;
1155 itv.it_value.tv_usec = 100000;
1156 if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1157 goto skip_simplesignal;
1158
1159 event_dispatch();
1160 skip_simplesignal:
1161 if (evsignal_del(&ev) == -1)
1162 test_ok = 0;
1163
1164 cleanup_test();
1165 }
1166
1167 static void
test_simplestsignal(void)1168 test_simplestsignal(void)
1169 {
1170 setup_test("Simplest one signal: ");
1171 test_simplesignal_impl(0);
1172 }
1173
1174 static void
test_simplesignal(void)1175 test_simplesignal(void)
1176 {
1177 setup_test("Simple signal: ");
1178 test_simplesignal_impl(1);
1179 }
1180
1181 static void
test_multiplesignal(void)1182 test_multiplesignal(void)
1183 {
1184 struct event ev_one, ev_two;
1185 struct itimerval itv;
1186
1187 setup_test("Multiple signal: ");
1188
1189 evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
1190 evsignal_add(&ev_one, NULL);
1191
1192 evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
1193 evsignal_add(&ev_two, NULL);
1194
1195 memset(&itv, 0, sizeof(itv));
1196 itv.it_value.tv_sec = 0;
1197 itv.it_value.tv_usec = 100000;
1198 if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
1199 goto skip_simplesignal;
1200
1201 event_dispatch();
1202
1203 skip_simplesignal:
1204 if (evsignal_del(&ev_one) == -1)
1205 test_ok = 0;
1206 if (evsignal_del(&ev_two) == -1)
1207 test_ok = 0;
1208
1209 cleanup_test();
1210 }
1211
1212 static void
test_immediatesignal(void)1213 test_immediatesignal(void)
1214 {
1215 struct event ev;
1216
1217 test_ok = 0;
1218 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1219 evsignal_add(&ev, NULL);
1220 kill(getpid(), SIGUSR1);
1221 event_loop(EVLOOP_NONBLOCK);
1222 evsignal_del(&ev);
1223 cleanup_test();
1224 }
1225
1226 static void
test_signal_dealloc(void)1227 test_signal_dealloc(void)
1228 {
1229 /* make sure that evsignal_event is event_del'ed and pipe closed */
1230 struct event ev;
1231 struct event_base *base = event_init();
1232 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1233 evsignal_add(&ev, NULL);
1234 evsignal_del(&ev);
1235 event_base_free(base);
1236 /* If we got here without asserting, we're fine. */
1237 test_ok = 1;
1238 cleanup_test();
1239 }
1240
1241 static void
test_signal_pipeloss(void)1242 test_signal_pipeloss(void)
1243 {
1244 /* make sure that the base1 pipe is closed correctly. */
1245 struct event_base *base1, *base2;
1246 int pipe1;
1247 test_ok = 0;
1248 base1 = event_init();
1249 pipe1 = base1->sig.ev_signal_pair[0];
1250 base2 = event_init();
1251 event_base_free(base2);
1252 event_base_free(base1);
1253 if (close(pipe1) != -1 || errno!=EBADF) {
1254 /* fd must be closed, so second close gives -1, EBADF */
1255 printf("signal pipe not closed. ");
1256 test_ok = 0;
1257 } else {
1258 test_ok = 1;
1259 }
1260 cleanup_test();
1261 }
1262
1263 /*
1264 * make two bases to catch signals, use both of them. this only works
1265 * for event mechanisms that use our signal pipe trick. kqueue handles
1266 * signals internally, and all interested kqueues get all the signals.
1267 */
1268 static void
test_signal_switchbase(void)1269 test_signal_switchbase(void)
1270 {
1271 struct event ev1, ev2;
1272 struct event_base *base1, *base2;
1273 int is_kqueue;
1274 test_ok = 0;
1275 base1 = event_init();
1276 base2 = event_init();
1277 is_kqueue = !strcmp(event_get_method(),"kqueue");
1278 evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
1279 evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
1280 if (event_base_set(base1, &ev1) ||
1281 event_base_set(base2, &ev2) ||
1282 event_add(&ev1, NULL) ||
1283 event_add(&ev2, NULL)) {
1284 fprintf(stderr, "%s: cannot set base, add\n", __func__);
1285 exit(1);
1286 }
1287
1288 tt_ptr_op(event_get_base(&ev1), ==, base1);
1289 tt_ptr_op(event_get_base(&ev2), ==, base2);
1290
1291 test_ok = 0;
1292 /* can handle signal before loop is called */
1293 kill(getpid(), SIGUSR1);
1294 event_base_loop(base2, EVLOOP_NONBLOCK);
1295 if (is_kqueue) {
1296 if (!test_ok)
1297 goto end;
1298 test_ok = 0;
1299 }
1300 event_base_loop(base1, EVLOOP_NONBLOCK);
1301 if (test_ok && !is_kqueue) {
1302 test_ok = 0;
1303
1304 /* set base1 to handle signals */
1305 event_base_loop(base1, EVLOOP_NONBLOCK);
1306 kill(getpid(), SIGUSR1);
1307 event_base_loop(base1, EVLOOP_NONBLOCK);
1308 event_base_loop(base2, EVLOOP_NONBLOCK);
1309 }
1310 end:
1311 event_base_free(base1);
1312 event_base_free(base2);
1313 cleanup_test();
1314 }
1315
1316 /*
1317 * assert that a signal event removed from the event queue really is
1318 * removed - with no possibility of it's parent handler being fired.
1319 */
1320 static void
test_signal_assert(void)1321 test_signal_assert(void)
1322 {
1323 struct event ev;
1324 struct event_base *base = event_init();
1325 test_ok = 0;
1326 /* use SIGCONT so we don't kill ourselves when we signal to nowhere */
1327 evsignal_set(&ev, SIGCONT, signal_cb, &ev);
1328 evsignal_add(&ev, NULL);
1329 /*
1330 * if evsignal_del() fails to reset the handler, it's current handler
1331 * will still point to evsig_handler().
1332 */
1333 evsignal_del(&ev);
1334
1335 kill(getpid(), SIGCONT);
1336 #if 0
1337 /* only way to verify we were in evsig_handler() */
1338 /* XXXX Now there's no longer a good way. */
1339 if (base->sig.evsig_caught)
1340 test_ok = 0;
1341 else
1342 test_ok = 1;
1343 #else
1344 test_ok = 1;
1345 #endif
1346
1347 event_base_free(base);
1348 cleanup_test();
1349 return;
1350 }
1351
1352 /*
1353 * assert that we restore our previous signal handler properly.
1354 */
1355 static void
test_signal_restore(void)1356 test_signal_restore(void)
1357 {
1358 struct event ev;
1359 struct event_base *base = event_init();
1360 #ifdef EVENT__HAVE_SIGACTION
1361 struct sigaction sa;
1362 #endif
1363
1364 test_ok = 0;
1365 #ifdef EVENT__HAVE_SIGACTION
1366 sa.sa_handler = signal_cb_sa;
1367 sa.sa_flags = 0x0;
1368 sigemptyset(&sa.sa_mask);
1369 if (sigaction(SIGUSR1, &sa, NULL) == -1)
1370 goto out;
1371 #else
1372 if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
1373 goto out;
1374 #endif
1375 evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1376 evsignal_add(&ev, NULL);
1377 evsignal_del(&ev);
1378
1379 kill(getpid(), SIGUSR1);
1380 /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
1381 if (test_ok != 2)
1382 test_ok = 0;
1383 out:
1384 event_base_free(base);
1385 cleanup_test();
1386 return;
1387 }
1388
1389 static void
signal_cb_swp(int sig,short event,void * arg)1390 signal_cb_swp(int sig, short event, void *arg)
1391 {
1392 called++;
1393 if (called < 5)
1394 kill(getpid(), sig);
1395 else
1396 event_loopexit(NULL);
1397 }
1398 static void
timeout_cb_swp(evutil_socket_t fd,short event,void * arg)1399 timeout_cb_swp(evutil_socket_t fd, short event, void *arg)
1400 {
1401 if (called == -1) {
1402 struct timeval tv = {5, 0};
1403
1404 called = 0;
1405 evtimer_add((struct event *)arg, &tv);
1406 kill(getpid(), SIGUSR1);
1407 return;
1408 }
1409 test_ok = 0;
1410 event_loopexit(NULL);
1411 }
1412
1413 static void
test_signal_while_processing(void)1414 test_signal_while_processing(void)
1415 {
1416 struct event_base *base = event_init();
1417 struct event ev, ev_timer;
1418 struct timeval tv = {0, 0};
1419
1420 setup_test("Receiving a signal while processing other signal: ");
1421
1422 called = -1;
1423 test_ok = 1;
1424 signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
1425 signal_add(&ev, NULL);
1426 evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
1427 evtimer_add(&ev_timer, &tv);
1428 event_dispatch();
1429
1430 event_base_free(base);
1431 cleanup_test();
1432 return;
1433 }
1434 #endif
1435
1436 static void
test_free_active_base(void * ptr)1437 test_free_active_base(void *ptr)
1438 {
1439 struct basic_test_data *data = ptr;
1440 struct event_base *base1;
1441 struct event ev1;
1442
1443 base1 = event_init();
1444 tt_assert(base1);
1445 event_assign(&ev1, base1, data->pair[1], EV_READ, dummy_read_cb, NULL);
1446 event_add(&ev1, NULL);
1447 event_base_free(base1); /* should not crash */
1448
1449 base1 = event_init();
1450 tt_assert(base1);
1451 event_assign(&ev1, base1, data->pair[0], 0, dummy_read_cb, NULL);
1452 event_active(&ev1, EV_READ, 1);
1453 event_base_free(base1);
1454 end:
1455 ;
1456 }
1457
1458 static void
test_manipulate_active_events(void * ptr)1459 test_manipulate_active_events(void *ptr)
1460 {
1461 struct basic_test_data *data = ptr;
1462 struct event_base *base = data->base;
1463 struct event ev1;
1464
1465 event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL);
1466
1467 /* Make sure an active event is pending. */
1468 event_active(&ev1, EV_READ, 1);
1469 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1470 ==, EV_READ);
1471
1472 /* Make sure that activating an event twice works. */
1473 event_active(&ev1, EV_WRITE, 1);
1474 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1475 ==, EV_READ|EV_WRITE);
1476
1477 end:
1478 event_del(&ev1);
1479 }
1480
1481 static void
event_selfarg_cb(evutil_socket_t fd,short event,void * arg)1482 event_selfarg_cb(evutil_socket_t fd, short event, void *arg)
1483 {
1484 struct event *ev = arg;
1485 struct event_base *base = event_get_base(ev);
1486 event_base_assert_ok_(base);
1487 event_base_loopexit(base, NULL);
1488 tt_want(ev == event_base_get_running_event(base));
1489 }
1490
1491 static void
test_event_new_selfarg(void * ptr)1492 test_event_new_selfarg(void *ptr)
1493 {
1494 struct basic_test_data *data = ptr;
1495 struct event_base *base = data->base;
1496 struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb,
1497 event_self_cbarg());
1498
1499 event_active(ev, EV_READ, 1);
1500 event_base_dispatch(base);
1501
1502 event_free(ev);
1503 }
1504
1505 static void
test_event_assign_selfarg(void * ptr)1506 test_event_assign_selfarg(void *ptr)
1507 {
1508 struct basic_test_data *data = ptr;
1509 struct event_base *base = data->base;
1510 struct event ev;
1511
1512 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1513 event_self_cbarg());
1514 event_active(&ev, EV_READ, 1);
1515 event_base_dispatch(base);
1516 }
1517
1518 static void
test_event_base_get_num_events(void * ptr)1519 test_event_base_get_num_events(void *ptr)
1520 {
1521 struct basic_test_data *data = ptr;
1522 struct event_base *base = data->base;
1523 struct event ev;
1524 int event_count_active;
1525 int event_count_virtual;
1526 int event_count_added;
1527 int event_count_active_virtual;
1528 int event_count_active_added;
1529 int event_count_virtual_added;
1530 int event_count_active_added_virtual;
1531
1532 struct timeval qsec = {0, 100000};
1533
1534 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1535 event_self_cbarg());
1536
1537 event_add(&ev, &qsec);
1538 event_count_active = event_base_get_num_events(base,
1539 EVENT_BASE_COUNT_ACTIVE);
1540 event_count_virtual = event_base_get_num_events(base,
1541 EVENT_BASE_COUNT_VIRTUAL);
1542 event_count_added = event_base_get_num_events(base,
1543 EVENT_BASE_COUNT_ADDED);
1544 event_count_active_virtual = event_base_get_num_events(base,
1545 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1546 event_count_active_added = event_base_get_num_events(base,
1547 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1548 event_count_virtual_added = event_base_get_num_events(base,
1549 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1550 event_count_active_added_virtual = event_base_get_num_events(base,
1551 EVENT_BASE_COUNT_ACTIVE|
1552 EVENT_BASE_COUNT_ADDED|
1553 EVENT_BASE_COUNT_VIRTUAL);
1554 tt_int_op(event_count_active, ==, 0);
1555 tt_int_op(event_count_virtual, ==, 0);
1556 /* libevent itself adds a timeout event, so the event_count is 2 here */
1557 tt_int_op(event_count_added, ==, 2);
1558 tt_int_op(event_count_active_virtual, ==, 0);
1559 tt_int_op(event_count_active_added, ==, 2);
1560 tt_int_op(event_count_virtual_added, ==, 2);
1561 tt_int_op(event_count_active_added_virtual, ==, 2);
1562
1563 event_active(&ev, EV_READ, 1);
1564 event_count_active = event_base_get_num_events(base,
1565 EVENT_BASE_COUNT_ACTIVE);
1566 event_count_virtual = event_base_get_num_events(base,
1567 EVENT_BASE_COUNT_VIRTUAL);
1568 event_count_added = event_base_get_num_events(base,
1569 EVENT_BASE_COUNT_ADDED);
1570 event_count_active_virtual = event_base_get_num_events(base,
1571 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1572 event_count_active_added = event_base_get_num_events(base,
1573 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1574 event_count_virtual_added = event_base_get_num_events(base,
1575 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1576 event_count_active_added_virtual = event_base_get_num_events(base,
1577 EVENT_BASE_COUNT_ACTIVE|
1578 EVENT_BASE_COUNT_ADDED|
1579 EVENT_BASE_COUNT_VIRTUAL);
1580 tt_int_op(event_count_active, ==, 1);
1581 tt_int_op(event_count_virtual, ==, 0);
1582 tt_int_op(event_count_added, ==, 3);
1583 tt_int_op(event_count_active_virtual, ==, 1);
1584 tt_int_op(event_count_active_added, ==, 4);
1585 tt_int_op(event_count_virtual_added, ==, 3);
1586 tt_int_op(event_count_active_added_virtual, ==, 4);
1587
1588 event_base_loop(base, 0);
1589 event_count_active = event_base_get_num_events(base,
1590 EVENT_BASE_COUNT_ACTIVE);
1591 event_count_virtual = event_base_get_num_events(base,
1592 EVENT_BASE_COUNT_VIRTUAL);
1593 event_count_added = event_base_get_num_events(base,
1594 EVENT_BASE_COUNT_ADDED);
1595 event_count_active_virtual = event_base_get_num_events(base,
1596 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1597 event_count_active_added = event_base_get_num_events(base,
1598 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1599 event_count_virtual_added = event_base_get_num_events(base,
1600 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1601 event_count_active_added_virtual = event_base_get_num_events(base,
1602 EVENT_BASE_COUNT_ACTIVE|
1603 EVENT_BASE_COUNT_ADDED|
1604 EVENT_BASE_COUNT_VIRTUAL);
1605 tt_int_op(event_count_active, ==, 0);
1606 tt_int_op(event_count_virtual, ==, 0);
1607 tt_int_op(event_count_added, ==, 0);
1608 tt_int_op(event_count_active_virtual, ==, 0);
1609 tt_int_op(event_count_active_added, ==, 0);
1610 tt_int_op(event_count_virtual_added, ==, 0);
1611 tt_int_op(event_count_active_added_virtual, ==, 0);
1612
1613 event_base_add_virtual_(base);
1614 event_count_active = event_base_get_num_events(base,
1615 EVENT_BASE_COUNT_ACTIVE);
1616 event_count_virtual = event_base_get_num_events(base,
1617 EVENT_BASE_COUNT_VIRTUAL);
1618 event_count_added = event_base_get_num_events(base,
1619 EVENT_BASE_COUNT_ADDED);
1620 event_count_active_virtual = event_base_get_num_events(base,
1621 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1622 event_count_active_added = event_base_get_num_events(base,
1623 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1624 event_count_virtual_added = event_base_get_num_events(base,
1625 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1626 event_count_active_added_virtual = event_base_get_num_events(base,
1627 EVENT_BASE_COUNT_ACTIVE|
1628 EVENT_BASE_COUNT_ADDED|
1629 EVENT_BASE_COUNT_VIRTUAL);
1630 tt_int_op(event_count_active, ==, 0);
1631 tt_int_op(event_count_virtual, ==, 1);
1632 tt_int_op(event_count_added, ==, 0);
1633 tt_int_op(event_count_active_virtual, ==, 1);
1634 tt_int_op(event_count_active_added, ==, 0);
1635 tt_int_op(event_count_virtual_added, ==, 1);
1636 tt_int_op(event_count_active_added_virtual, ==, 1);
1637
1638 end:
1639 ;
1640 }
1641
1642 static void
test_event_base_get_max_events(void * ptr)1643 test_event_base_get_max_events(void *ptr)
1644 {
1645 struct basic_test_data *data = ptr;
1646 struct event_base *base = data->base;
1647 struct event ev;
1648 struct event ev2;
1649 int event_count_active;
1650 int event_count_virtual;
1651 int event_count_added;
1652 int event_count_active_virtual;
1653 int event_count_active_added;
1654 int event_count_virtual_added;
1655 int event_count_active_added_virtual;
1656
1657 struct timeval qsec = {0, 100000};
1658
1659 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1660 event_self_cbarg());
1661 event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb,
1662 event_self_cbarg());
1663
1664 event_add(&ev, &qsec);
1665 event_add(&ev2, &qsec);
1666 event_del(&ev2);
1667
1668 event_count_active = event_base_get_max_events(base,
1669 EVENT_BASE_COUNT_ACTIVE, 0);
1670 event_count_virtual = event_base_get_max_events(base,
1671 EVENT_BASE_COUNT_VIRTUAL, 0);
1672 event_count_added = event_base_get_max_events(base,
1673 EVENT_BASE_COUNT_ADDED, 0);
1674 event_count_active_virtual = event_base_get_max_events(base,
1675 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1676 event_count_active_added = event_base_get_max_events(base,
1677 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1678 event_count_virtual_added = event_base_get_max_events(base,
1679 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1680 event_count_active_added_virtual = event_base_get_max_events(base,
1681 EVENT_BASE_COUNT_ACTIVE |
1682 EVENT_BASE_COUNT_ADDED |
1683 EVENT_BASE_COUNT_VIRTUAL, 0);
1684
1685 tt_int_op(event_count_active, ==, 0);
1686 tt_int_op(event_count_virtual, ==, 0);
1687 /* libevent itself adds a timeout event, so the event_count is 4 here */
1688 tt_int_op(event_count_added, ==, 4);
1689 tt_int_op(event_count_active_virtual, ==, 0);
1690 tt_int_op(event_count_active_added, ==, 4);
1691 tt_int_op(event_count_virtual_added, ==, 4);
1692 tt_int_op(event_count_active_added_virtual, ==, 4);
1693
1694 event_active(&ev, EV_READ, 1);
1695 event_count_active = event_base_get_max_events(base,
1696 EVENT_BASE_COUNT_ACTIVE, 0);
1697 event_count_virtual = event_base_get_max_events(base,
1698 EVENT_BASE_COUNT_VIRTUAL, 0);
1699 event_count_added = event_base_get_max_events(base,
1700 EVENT_BASE_COUNT_ADDED, 0);
1701 event_count_active_virtual = event_base_get_max_events(base,
1702 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1703 event_count_active_added = event_base_get_max_events(base,
1704 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1705 event_count_virtual_added = event_base_get_max_events(base,
1706 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1707 event_count_active_added_virtual = event_base_get_max_events(base,
1708 EVENT_BASE_COUNT_ACTIVE |
1709 EVENT_BASE_COUNT_ADDED |
1710 EVENT_BASE_COUNT_VIRTUAL, 0);
1711
1712 tt_int_op(event_count_active, ==, 1);
1713 tt_int_op(event_count_virtual, ==, 0);
1714 tt_int_op(event_count_added, ==, 4);
1715 tt_int_op(event_count_active_virtual, ==, 1);
1716 tt_int_op(event_count_active_added, ==, 5);
1717 tt_int_op(event_count_virtual_added, ==, 4);
1718 tt_int_op(event_count_active_added_virtual, ==, 5);
1719
1720 event_base_loop(base, 0);
1721 event_count_active = event_base_get_max_events(base,
1722 EVENT_BASE_COUNT_ACTIVE, 1);
1723 event_count_virtual = event_base_get_max_events(base,
1724 EVENT_BASE_COUNT_VIRTUAL, 1);
1725 event_count_added = event_base_get_max_events(base,
1726 EVENT_BASE_COUNT_ADDED, 1);
1727 event_count_active_virtual = event_base_get_max_events(base,
1728 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1729 event_count_active_added = event_base_get_max_events(base,
1730 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1731 event_count_virtual_added = event_base_get_max_events(base,
1732 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1733 event_count_active_added_virtual = event_base_get_max_events(base,
1734 EVENT_BASE_COUNT_ACTIVE |
1735 EVENT_BASE_COUNT_ADDED |
1736 EVENT_BASE_COUNT_VIRTUAL, 1);
1737
1738 tt_int_op(event_count_active, ==, 1);
1739 tt_int_op(event_count_virtual, ==, 0);
1740 tt_int_op(event_count_added, ==, 4);
1741 tt_int_op(event_count_active_virtual, ==, 0);
1742 tt_int_op(event_count_active_added, ==, 0);
1743 tt_int_op(event_count_virtual_added, ==, 0);
1744 tt_int_op(event_count_active_added_virtual, ==, 0);
1745
1746 event_count_active = event_base_get_max_events(base,
1747 EVENT_BASE_COUNT_ACTIVE, 0);
1748 event_count_virtual = event_base_get_max_events(base,
1749 EVENT_BASE_COUNT_VIRTUAL, 0);
1750 event_count_added = event_base_get_max_events(base,
1751 EVENT_BASE_COUNT_ADDED, 0);
1752 tt_int_op(event_count_active, ==, 0);
1753 tt_int_op(event_count_virtual, ==, 0);
1754 tt_int_op(event_count_added, ==, 0);
1755
1756 event_base_add_virtual_(base);
1757 event_count_active = event_base_get_max_events(base,
1758 EVENT_BASE_COUNT_ACTIVE, 0);
1759 event_count_virtual = event_base_get_max_events(base,
1760 EVENT_BASE_COUNT_VIRTUAL, 0);
1761 event_count_added = event_base_get_max_events(base,
1762 EVENT_BASE_COUNT_ADDED, 0);
1763 event_count_active_virtual = event_base_get_max_events(base,
1764 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1765 event_count_active_added = event_base_get_max_events(base,
1766 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1767 event_count_virtual_added = event_base_get_max_events(base,
1768 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1769 event_count_active_added_virtual = event_base_get_max_events(base,
1770 EVENT_BASE_COUNT_ACTIVE |
1771 EVENT_BASE_COUNT_ADDED |
1772 EVENT_BASE_COUNT_VIRTUAL, 0);
1773
1774 tt_int_op(event_count_active, ==, 0);
1775 tt_int_op(event_count_virtual, ==, 1);
1776 tt_int_op(event_count_added, ==, 0);
1777 tt_int_op(event_count_active_virtual, ==, 1);
1778 tt_int_op(event_count_active_added, ==, 0);
1779 tt_int_op(event_count_virtual_added, ==, 1);
1780 tt_int_op(event_count_active_added_virtual, ==, 1);
1781
1782 end:
1783 ;
1784 }
1785
1786 static void
test_bad_assign(void * ptr)1787 test_bad_assign(void *ptr)
1788 {
1789 struct event ev;
1790 int r;
1791 /* READ|SIGNAL is not allowed */
1792 r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL);
1793 tt_int_op(r,==,-1);
1794
1795 end:
1796 ;
1797 }
1798
1799 static int reentrant_cb_run = 0;
1800
1801 static void
bad_reentrant_run_loop_cb(evutil_socket_t fd,short what,void * ptr)1802 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr)
1803 {
1804 struct event_base *base = ptr;
1805 int r;
1806 reentrant_cb_run = 1;
1807 /* This reentrant call to event_base_loop should be detected and
1808 * should fail */
1809 r = event_base_loop(base, 0);
1810 tt_int_op(r, ==, -1);
1811 end:
1812 ;
1813 }
1814
1815 static void
test_bad_reentrant(void * ptr)1816 test_bad_reentrant(void *ptr)
1817 {
1818 struct basic_test_data *data = ptr;
1819 struct event_base *base = data->base;
1820 struct event ev;
1821 int r;
1822 event_assign(&ev, base, -1,
1823 0, bad_reentrant_run_loop_cb, base);
1824
1825 event_active(&ev, EV_WRITE, 1);
1826 r = event_base_loop(base, 0);
1827 tt_int_op(r, ==, 1);
1828 tt_int_op(reentrant_cb_run, ==, 1);
1829 end:
1830 ;
1831 }
1832
1833 static int n_write_a_byte_cb=0;
1834 static int n_read_and_drain_cb=0;
1835 static int n_activate_other_event_cb=0;
1836 static void
write_a_byte_cb(evutil_socket_t fd,short what,void * arg)1837 write_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1838 {
1839 char buf[] = "x";
1840 if (write(fd, buf, 1) == 1)
1841 ++n_write_a_byte_cb;
1842 }
1843 static void
read_and_drain_cb(evutil_socket_t fd,short what,void * arg)1844 read_and_drain_cb(evutil_socket_t fd, short what, void *arg)
1845 {
1846 char buf[128];
1847 int n;
1848 ++n_read_and_drain_cb;
1849 while ((n = read(fd, buf, sizeof(buf))) > 0)
1850 ;
1851 }
1852
1853 static void
activate_other_event_cb(evutil_socket_t fd,short what,void * other_)1854 activate_other_event_cb(evutil_socket_t fd, short what, void *other_)
1855 {
1856 struct event *ev_activate = other_;
1857 ++n_activate_other_event_cb;
1858 event_active_later_(ev_activate, EV_READ);
1859 }
1860
1861 static void
test_active_later(void * ptr)1862 test_active_later(void *ptr)
1863 {
1864 struct basic_test_data *data = ptr;
1865 struct event *ev1 = NULL, *ev2 = NULL;
1866 struct event ev3, ev4;
1867 struct timeval qsec = {0, 100000};
1868 ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL);
1869 ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL);
1870 event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4);
1871 event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3);
1872 event_add(ev1, NULL);
1873 event_add(ev2, NULL);
1874 event_active_later_(&ev3, EV_READ);
1875
1876 event_base_loopexit(data->base, &qsec);
1877
1878 event_base_loop(data->base, 0);
1879
1880 TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.",
1881 n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb));
1882 event_del(&ev3);
1883 event_del(&ev4);
1884
1885 tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb);
1886 tt_int_op(n_write_a_byte_cb, >, 100);
1887 tt_int_op(n_read_and_drain_cb, >, 100);
1888 tt_int_op(n_activate_other_event_cb, >, 100);
1889
1890 event_active_later_(&ev4, EV_READ);
1891 event_active(&ev4, EV_READ, 1); /* This should make the event
1892 active immediately. */
1893 tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0);
1894 tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0);
1895
1896 /* Now leave this one around, so that event_free sees it and removes
1897 * it. */
1898 event_active_later_(&ev3, EV_READ);
1899 event_base_assert_ok_(data->base);
1900
1901 end:
1902 if (ev1)
1903 event_free(ev1);
1904 if (ev2)
1905 event_free(ev2);
1906
1907 event_base_free(data->base);
1908 data->base = NULL;
1909 }
1910
1911
incr_arg_cb(evutil_socket_t fd,short what,void * arg)1912 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg)
1913 {
1914 int *intptr = arg;
1915 (void) fd; (void) what;
1916 ++*intptr;
1917 }
remove_timers_cb(evutil_socket_t fd,short what,void * arg)1918 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg)
1919 {
1920 struct event **ep = arg;
1921 (void) fd; (void) what;
1922 event_remove_timer(ep[0]);
1923 event_remove_timer(ep[1]);
1924 }
send_a_byte_cb(evutil_socket_t fd,short what,void * arg)1925 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1926 {
1927 evutil_socket_t *sockp = arg;
1928 (void) fd; (void) what;
1929 if (write(*sockp, "A", 1) < 0)
1930 tt_fail_perror("write");
1931 }
1932 struct read_not_timeout_param
1933 {
1934 struct event **ev;
1935 int events;
1936 int count;
1937 };
read_not_timeout_cb(evutil_socket_t fd,short what,void * arg)1938 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg)
1939 {
1940 struct read_not_timeout_param *rntp = arg;
1941 char c;
1942 ev_ssize_t n;
1943 (void) fd; (void) what;
1944 n = read(fd, &c, 1);
1945 tt_int_op(n, ==, 1);
1946 rntp->events |= what;
1947 ++rntp->count;
1948 if(2 == rntp->count) event_del(rntp->ev[0]);
1949 end:
1950 ;
1951 }
1952
1953 static void
test_event_remove_timeout(void * ptr)1954 test_event_remove_timeout(void *ptr)
1955 {
1956 struct basic_test_data *data = ptr;
1957 struct event_base *base = data->base;
1958 struct event *ev[5];
1959 int ev1_fired=0;
1960 struct timeval ms25 = { 0, 25*1000 },
1961 ms40 = { 0, 40*1000 },
1962 ms75 = { 0, 75*1000 },
1963 ms125 = { 0, 125*1000 };
1964 struct read_not_timeout_param rntp = { ev, 0, 0 };
1965
1966 event_base_assert_ok_(base);
1967
1968 ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST,
1969 read_not_timeout_cb, &rntp);
1970 ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired);
1971 ev[2] = evtimer_new(base, remove_timers_cb, ev);
1972 ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1973 ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1974 tt_assert(base);
1975 event_add(ev[2], &ms25); /* remove timers */
1976 event_add(ev[4], &ms40); /* write to test if timer re-activates */
1977 event_add(ev[0], &ms75); /* read */
1978 event_add(ev[1], &ms75); /* timer */
1979 event_add(ev[3], &ms125); /* timeout. */
1980 event_base_assert_ok_(base);
1981
1982 event_base_dispatch(base);
1983
1984 tt_int_op(ev1_fired, ==, 0);
1985 tt_int_op(rntp.events, ==, EV_READ);
1986
1987 event_base_assert_ok_(base);
1988 end:
1989 event_free(ev[0]);
1990 event_free(ev[1]);
1991 event_free(ev[2]);
1992 event_free(ev[3]);
1993 event_free(ev[4]);
1994 }
1995
1996 static void
test_event_base_new(void * ptr)1997 test_event_base_new(void *ptr)
1998 {
1999 struct basic_test_data *data = ptr;
2000 struct event_base *base = 0;
2001 struct event ev1;
2002 struct basic_cb_args args;
2003
2004 int towrite = (int)strlen(TEST1)+1;
2005 int len = write(data->pair[0], TEST1, towrite);
2006
2007 if (len < 0)
2008 tt_abort_perror("initial write");
2009 else if (len != towrite)
2010 tt_abort_printf(("initial write fell short (%d of %d bytes)",
2011 len, towrite));
2012
2013 if (shutdown(data->pair[0], EVUTIL_SHUT_WR))
2014 tt_abort_perror("initial write shutdown");
2015
2016 base = event_base_new();
2017 if (!base)
2018 tt_abort_msg("failed to create event base");
2019
2020 args.eb = base;
2021 args.ev = &ev1;
2022 args.callcount = 0;
2023 event_assign(&ev1, base, data->pair[1],
2024 EV_READ|EV_PERSIST, basic_read_cb, &args);
2025
2026 if (event_add(&ev1, NULL))
2027 tt_abort_perror("initial event_add");
2028
2029 if (event_base_loop(base, 0))
2030 tt_abort_msg("unsuccessful exit from event loop");
2031
2032 end:
2033 if (base)
2034 event_base_free(base);
2035 }
2036
2037 static void
test_loopexit(void)2038 test_loopexit(void)
2039 {
2040 struct timeval tv, tv_start, tv_end;
2041 struct event ev;
2042
2043 setup_test("Loop exit: ");
2044
2045 tv.tv_usec = 0;
2046 tv.tv_sec = 60*60*24;
2047 evtimer_set(&ev, timeout_cb, NULL);
2048 evtimer_add(&ev, &tv);
2049
2050 tv.tv_usec = 300*1000;
2051 tv.tv_sec = 0;
2052 event_loopexit(&tv);
2053
2054 evutil_gettimeofday(&tv_start, NULL);
2055 event_dispatch();
2056 evutil_gettimeofday(&tv_end, NULL);
2057
2058 evtimer_del(&ev);
2059
2060 tt_assert(event_base_got_exit(global_base));
2061 tt_assert(!event_base_got_break(global_base));
2062
2063 test_timeval_diff_eq(&tv_start, &tv_end, 300);
2064
2065 test_ok = 1;
2066 end:
2067 cleanup_test();
2068 }
2069
2070 static void
test_loopexit_multiple(void)2071 test_loopexit_multiple(void)
2072 {
2073 struct timeval tv, tv_start, tv_end;
2074 struct event_base *base;
2075
2076 setup_test("Loop Multiple exit: ");
2077
2078 base = event_base_new();
2079
2080 tv.tv_usec = 200*1000;
2081 tv.tv_sec = 0;
2082 event_base_loopexit(base, &tv);
2083
2084 tv.tv_usec = 0;
2085 tv.tv_sec = 3;
2086 event_base_loopexit(base, &tv);
2087
2088 evutil_gettimeofday(&tv_start, NULL);
2089 event_base_dispatch(base);
2090 evutil_gettimeofday(&tv_end, NULL);
2091
2092 tt_assert(event_base_got_exit(base));
2093 tt_assert(!event_base_got_break(base));
2094
2095 event_base_free(base);
2096
2097 test_timeval_diff_eq(&tv_start, &tv_end, 200);
2098
2099 test_ok = 1;
2100
2101 end:
2102 cleanup_test();
2103 }
2104
2105 static void
break_cb(evutil_socket_t fd,short events,void * arg)2106 break_cb(evutil_socket_t fd, short events, void *arg)
2107 {
2108 test_ok = 1;
2109 event_loopbreak();
2110 }
2111
2112 static void
fail_cb(evutil_socket_t fd,short events,void * arg)2113 fail_cb(evutil_socket_t fd, short events, void *arg)
2114 {
2115 test_ok = 0;
2116 }
2117
2118 static void
test_loopbreak(void)2119 test_loopbreak(void)
2120 {
2121 struct event ev1, ev2;
2122 struct timeval tv;
2123
2124 setup_test("Loop break: ");
2125
2126 tv.tv_sec = 0;
2127 tv.tv_usec = 0;
2128 evtimer_set(&ev1, break_cb, NULL);
2129 evtimer_add(&ev1, &tv);
2130 evtimer_set(&ev2, fail_cb, NULL);
2131 evtimer_add(&ev2, &tv);
2132
2133 event_dispatch();
2134
2135 tt_assert(!event_base_got_exit(global_base));
2136 tt_assert(event_base_got_break(global_base));
2137
2138 evtimer_del(&ev1);
2139 evtimer_del(&ev2);
2140
2141 end:
2142 cleanup_test();
2143 }
2144
2145 static struct event *readd_test_event_last_added = NULL;
2146 static void
re_add_read_cb(evutil_socket_t fd,short event,void * arg)2147 re_add_read_cb(evutil_socket_t fd, short event, void *arg)
2148 {
2149 char buf[256];
2150 struct event *ev_other = arg;
2151 ev_ssize_t n_read;
2152
2153 readd_test_event_last_added = ev_other;
2154
2155 n_read = read(fd, buf, sizeof(buf));
2156
2157 if (n_read < 0) {
2158 tt_fail_perror("read");
2159 event_base_loopbreak(event_get_base(ev_other));
2160 } else {
2161 event_add(ev_other, NULL);
2162 ++test_ok;
2163 }
2164 }
2165 static void
test_nonpersist_readd(void * _data)2166 test_nonpersist_readd(void *_data)
2167 {
2168 struct event ev1, ev2;
2169 struct basic_test_data *data = _data;
2170
2171 memset(&ev1, 0, sizeof(ev1));
2172 memset(&ev2, 0, sizeof(ev2));
2173
2174 tt_assert(!event_assign(&ev1, data->base, data->pair[0], EV_READ, re_add_read_cb, &ev2));
2175 tt_assert(!event_assign(&ev2, data->base, data->pair[1], EV_READ, re_add_read_cb, &ev1));
2176
2177 tt_int_op(write(data->pair[0], "Hello", 5), ==, 5);
2178 tt_int_op(write(data->pair[1], "Hello", 5), ==, 5);
2179
2180 tt_int_op(event_add(&ev1, NULL), ==, 0);
2181 tt_int_op(event_add(&ev2, NULL), ==, 0);
2182 tt_int_op(event_base_loop(data->base, EVLOOP_ONCE), ==, 0);
2183 tt_int_op(test_ok, ==, 2);
2184
2185 /* At this point, we executed both callbacks. Whichever one got
2186 * called first added the second, but the second then immediately got
2187 * deleted before its callback was called. At this point, though, it
2188 * re-added the first.
2189 */
2190 tt_assert(readd_test_event_last_added);
2191 if (readd_test_event_last_added == &ev1) {
2192 tt_assert(event_pending(&ev1, EV_READ, NULL) && !event_pending(&ev2, EV_READ, NULL));
2193 } else {
2194 tt_assert(event_pending(&ev2, EV_READ, NULL) && !event_pending(&ev1, EV_READ, NULL));
2195 }
2196
2197 end:
2198 if (event_initialized(&ev1))
2199 event_del(&ev1);
2200 if (event_initialized(&ev2))
2201 event_del(&ev2);
2202 }
2203
2204 struct test_pri_event {
2205 struct event ev;
2206 int count;
2207 };
2208
2209 static void
test_priorities_cb(evutil_socket_t fd,short what,void * arg)2210 test_priorities_cb(evutil_socket_t fd, short what, void *arg)
2211 {
2212 struct test_pri_event *pri = arg;
2213 struct timeval tv;
2214
2215 if (pri->count == 3) {
2216 event_loopexit(NULL);
2217 return;
2218 }
2219
2220 pri->count++;
2221
2222 evutil_timerclear(&tv);
2223 event_add(&pri->ev, &tv);
2224 }
2225
2226 static void
test_priorities_impl(int npriorities)2227 test_priorities_impl(int npriorities)
2228 {
2229 struct test_pri_event one, two;
2230 struct timeval tv;
2231
2232 TT_BLATHER(("Testing Priorities %d: ", npriorities));
2233
2234 event_base_priority_init(global_base, npriorities);
2235
2236 memset(&one, 0, sizeof(one));
2237 memset(&two, 0, sizeof(two));
2238
2239 timeout_set(&one.ev, test_priorities_cb, &one);
2240 if (event_priority_set(&one.ev, 0) == -1) {
2241 fprintf(stderr, "%s: failed to set priority", __func__);
2242 exit(1);
2243 }
2244
2245 timeout_set(&two.ev, test_priorities_cb, &two);
2246 if (event_priority_set(&two.ev, npriorities - 1) == -1) {
2247 fprintf(stderr, "%s: failed to set priority", __func__);
2248 exit(1);
2249 }
2250
2251 evutil_timerclear(&tv);
2252
2253 if (event_add(&one.ev, &tv) == -1)
2254 exit(1);
2255 if (event_add(&two.ev, &tv) == -1)
2256 exit(1);
2257
2258 event_dispatch();
2259
2260 event_del(&one.ev);
2261 event_del(&two.ev);
2262
2263 if (npriorities == 1) {
2264 if (one.count == 3 && two.count == 3)
2265 test_ok = 1;
2266 } else if (npriorities == 2) {
2267 /* Two is called once because event_loopexit is priority 1 */
2268 if (one.count == 3 && two.count == 1)
2269 test_ok = 1;
2270 } else {
2271 if (one.count == 3 && two.count == 0)
2272 test_ok = 1;
2273 }
2274 }
2275
2276 static void
test_priorities(void)2277 test_priorities(void)
2278 {
2279 test_priorities_impl(1);
2280 if (test_ok)
2281 test_priorities_impl(2);
2282 if (test_ok)
2283 test_priorities_impl(3);
2284 }
2285
2286 /* priority-active-inversion: activate a higher-priority event, and make sure
2287 * it keeps us from running a lower-priority event first. */
2288 static int n_pai_calls = 0;
2289 static struct event pai_events[3];
2290
2291 static void
prio_active_inversion_cb(evutil_socket_t fd,short what,void * arg)2292 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg)
2293 {
2294 int *call_order = arg;
2295 *call_order = n_pai_calls++;
2296 if (n_pai_calls == 1) {
2297 /* This should activate later, even though it shares a
2298 priority with us. */
2299 event_active(&pai_events[1], EV_READ, 1);
2300 /* This should activate next, since its priority is higher,
2301 even though we activated it second. */
2302 event_active(&pai_events[2], EV_TIMEOUT, 1);
2303 }
2304 }
2305
2306 static void
test_priority_active_inversion(void * data_)2307 test_priority_active_inversion(void *data_)
2308 {
2309 struct basic_test_data *data = data_;
2310 struct event_base *base = data->base;
2311 int call_order[3];
2312 int i;
2313 tt_int_op(event_base_priority_init(base, 8), ==, 0);
2314
2315 n_pai_calls = 0;
2316 memset(call_order, 0, sizeof(call_order));
2317
2318 for (i=0;i<3;++i) {
2319 event_assign(&pai_events[i], data->base, -1, 0,
2320 prio_active_inversion_cb, &call_order[i]);
2321 }
2322
2323 event_priority_set(&pai_events[0], 4);
2324 event_priority_set(&pai_events[1], 4);
2325 event_priority_set(&pai_events[2], 0);
2326
2327 event_active(&pai_events[0], EV_WRITE, 1);
2328
2329 event_base_dispatch(base);
2330 tt_int_op(n_pai_calls, ==, 3);
2331 tt_int_op(call_order[0], ==, 0);
2332 tt_int_op(call_order[1], ==, 2);
2333 tt_int_op(call_order[2], ==, 1);
2334 end:
2335 ;
2336 }
2337
2338
2339 static void
test_multiple_cb(evutil_socket_t fd,short event,void * arg)2340 test_multiple_cb(evutil_socket_t fd, short event, void *arg)
2341 {
2342 if (event & EV_READ)
2343 test_ok |= 1;
2344 else if (event & EV_WRITE)
2345 test_ok |= 2;
2346 }
2347
2348 static void
test_multiple_events_for_same_fd(void)2349 test_multiple_events_for_same_fd(void)
2350 {
2351 struct event e1, e2;
2352
2353 setup_test("Multiple events for same fd: ");
2354
2355 event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
2356 event_add(&e1, NULL);
2357 event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
2358 event_add(&e2, NULL);
2359 event_loop(EVLOOP_ONCE);
2360 event_del(&e2);
2361
2362 if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) {
2363 tt_fail_perror("write");
2364 }
2365
2366 event_loop(EVLOOP_ONCE);
2367 event_del(&e1);
2368
2369 if (test_ok != 3)
2370 test_ok = 0;
2371
2372 cleanup_test();
2373 }
2374
2375 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2376 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
2377 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number);
2378 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2379
2380 static void
read_once_cb(evutil_socket_t fd,short event,void * arg)2381 read_once_cb(evutil_socket_t fd, short event, void *arg)
2382 {
2383 char buf[256];
2384 int len;
2385
2386 len = read(fd, buf, sizeof(buf));
2387
2388 if (called) {
2389 test_ok = 0;
2390 } else if (len) {
2391 /* Assumes global pair[0] can be used for writing */
2392 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2393 tt_fail_perror("write");
2394 test_ok = 0;
2395 } else {
2396 test_ok = 1;
2397 }
2398 }
2399
2400 called++;
2401 }
2402
2403 static void
test_want_only_once(void)2404 test_want_only_once(void)
2405 {
2406 struct event ev;
2407 struct timeval tv;
2408
2409 /* Very simple read test */
2410 setup_test("Want read only once: ");
2411
2412 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2413 tt_fail_perror("write");
2414 }
2415
2416 /* Setup the loop termination */
2417 evutil_timerclear(&tv);
2418 tv.tv_usec = 300*1000;
2419 event_loopexit(&tv);
2420
2421 event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
2422 if (event_add(&ev, NULL) == -1)
2423 exit(1);
2424 event_dispatch();
2425
2426 cleanup_test();
2427 }
2428
2429 #define TEST_MAX_INT 6
2430
2431 static void
evtag_int_test(void * ptr)2432 evtag_int_test(void *ptr)
2433 {
2434 struct evbuffer *tmp = evbuffer_new();
2435 ev_uint32_t integers[TEST_MAX_INT] = {
2436 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2437 };
2438 ev_uint32_t integer;
2439 ev_uint64_t big_int;
2440 int i;
2441
2442 evtag_init();
2443
2444 for (i = 0; i < TEST_MAX_INT; i++) {
2445 int oldlen, newlen;
2446 oldlen = (int)EVBUFFER_LENGTH(tmp);
2447 evtag_encode_int(tmp, integers[i]);
2448 newlen = (int)EVBUFFER_LENGTH(tmp);
2449 TT_BLATHER(("encoded 0x%08x with %d bytes",
2450 (unsigned)integers[i], newlen - oldlen));
2451 big_int = integers[i];
2452 big_int *= 1000000000; /* 1 billion */
2453 evtag_encode_int64(tmp, big_int);
2454 }
2455
2456 for (i = 0; i < TEST_MAX_INT; i++) {
2457 tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
2458 tt_uint_op(integer, ==, integers[i]);
2459 tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
2460 tt_assert((big_int / 1000000000) == integers[i]);
2461 }
2462
2463 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2464 end:
2465 evbuffer_free(tmp);
2466 }
2467
2468 static void
evtag_fuzz(void * ptr)2469 evtag_fuzz(void *ptr)
2470 {
2471 unsigned char buffer[4096];
2472 struct evbuffer *tmp = evbuffer_new();
2473 struct timeval tv;
2474 int i, j;
2475
2476 int not_failed = 0;
2477
2478 evtag_init();
2479
2480 for (j = 0; j < 100; j++) {
2481 for (i = 0; i < (int)sizeof(buffer); i++)
2482 buffer[i] = test_weakrand();
2483 evbuffer_drain(tmp, -1);
2484 evbuffer_add(tmp, buffer, sizeof(buffer));
2485
2486 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
2487 not_failed++;
2488 }
2489
2490 /* The majority of decodes should fail */
2491 tt_int_op(not_failed, <, 10);
2492
2493 /* Now insert some corruption into the tag length field */
2494 evbuffer_drain(tmp, -1);
2495 evutil_timerclear(&tv);
2496 tv.tv_sec = 1;
2497 evtag_marshal_timeval(tmp, 0, &tv);
2498 evbuffer_add(tmp, buffer, sizeof(buffer));
2499
2500 ((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
2501 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
2502 tt_abort_msg("evtag_unmarshal_timeval should have failed");
2503 }
2504
2505 end:
2506 evbuffer_free(tmp);
2507 }
2508
2509 static void
evtag_tag_encoding(void * ptr)2510 evtag_tag_encoding(void *ptr)
2511 {
2512 struct evbuffer *tmp = evbuffer_new();
2513 ev_uint32_t integers[TEST_MAX_INT] = {
2514 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2515 };
2516 ev_uint32_t integer;
2517 int i;
2518
2519 evtag_init();
2520
2521 for (i = 0; i < TEST_MAX_INT; i++) {
2522 int oldlen, newlen;
2523 oldlen = (int)EVBUFFER_LENGTH(tmp);
2524 evtag_encode_tag(tmp, integers[i]);
2525 newlen = (int)EVBUFFER_LENGTH(tmp);
2526 TT_BLATHER(("encoded 0x%08x with %d bytes",
2527 (unsigned)integers[i], newlen - oldlen));
2528 }
2529
2530 for (i = 0; i < TEST_MAX_INT; i++) {
2531 tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
2532 tt_uint_op(integer, ==, integers[i]);
2533 }
2534
2535 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2536
2537 end:
2538 evbuffer_free(tmp);
2539 }
2540
2541 static void
evtag_test_peek(void * ptr)2542 evtag_test_peek(void *ptr)
2543 {
2544 struct evbuffer *tmp = evbuffer_new();
2545 ev_uint32_t u32;
2546
2547 evtag_marshal_int(tmp, 30, 0);
2548 evtag_marshal_string(tmp, 40, "Hello world");
2549
2550 tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2551 tt_int_op(u32, ==, 30);
2552 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2553 tt_int_op(u32, ==, 1+1+1);
2554 tt_int_op(evtag_consume(tmp), ==, 0);
2555
2556 tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2557 tt_int_op(u32, ==, 40);
2558 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2559 tt_int_op(u32, ==, 1+1+11);
2560 tt_int_op(evtag_payload_length(tmp, &u32), ==, 0);
2561 tt_int_op(u32, ==, 11);
2562
2563 end:
2564 evbuffer_free(tmp);
2565 }
2566
2567
2568 static void
test_methods(void * ptr)2569 test_methods(void *ptr)
2570 {
2571 const char **methods = event_get_supported_methods();
2572 struct event_config *cfg = NULL;
2573 struct event_base *base = NULL;
2574 const char *backend;
2575 int n_methods = 0;
2576
2577 tt_assert(methods);
2578
2579 backend = methods[0];
2580 while (*methods != NULL) {
2581 TT_BLATHER(("Support method: %s", *methods));
2582 ++methods;
2583 ++n_methods;
2584 }
2585
2586 cfg = event_config_new();
2587 assert(cfg != NULL);
2588
2589 tt_int_op(event_config_avoid_method(cfg, backend), ==, 0);
2590 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2591
2592 base = event_base_new_with_config(cfg);
2593 if (n_methods > 1) {
2594 tt_assert(base);
2595 tt_str_op(backend, !=, event_base_get_method(base));
2596 } else {
2597 tt_assert(base == NULL);
2598 }
2599
2600 end:
2601 if (base)
2602 event_base_free(base);
2603 if (cfg)
2604 event_config_free(cfg);
2605 }
2606
2607 static void
test_version(void * arg)2608 test_version(void *arg)
2609 {
2610 const char *vstr;
2611 ev_uint32_t vint;
2612 int major, minor, patch, n;
2613
2614 vstr = event_get_version();
2615 vint = event_get_version_number();
2616
2617 tt_assert(vstr);
2618 tt_assert(vint);
2619
2620 tt_str_op(vstr, ==, LIBEVENT_VERSION);
2621 tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER);
2622
2623 n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch);
2624 tt_assert(3 == n);
2625 tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8)));
2626 end:
2627 ;
2628 }
2629
2630 static void
test_base_features(void * arg)2631 test_base_features(void *arg)
2632 {
2633 struct event_base *base = NULL;
2634 struct event_config *cfg = NULL;
2635
2636 cfg = event_config_new();
2637
2638 tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET));
2639
2640 base = event_base_new_with_config(cfg);
2641 if (base) {
2642 tt_int_op(EV_FEATURE_ET, ==,
2643 event_base_get_features(base) & EV_FEATURE_ET);
2644 } else {
2645 base = event_base_new();
2646 tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET);
2647 }
2648
2649 end:
2650 if (base)
2651 event_base_free(base);
2652 if (cfg)
2653 event_config_free(cfg);
2654 }
2655
2656 #ifdef EVENT__HAVE_SETENV
2657 #define SETENV_OK
2658 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV)
setenv(const char * k,const char * v,int o_)2659 static void setenv(const char *k, const char *v, int o_)
2660 {
2661 char b[256];
2662 evutil_snprintf(b, sizeof(b), "%s=%s",k,v);
2663 putenv(b);
2664 }
2665 #define SETENV_OK
2666 #endif
2667
2668 #ifdef EVENT__HAVE_UNSETENV
2669 #define UNSETENV_OK
2670 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV)
unsetenv(const char * k)2671 static void unsetenv(const char *k)
2672 {
2673 char b[256];
2674 evutil_snprintf(b, sizeof(b), "%s=",k);
2675 putenv(b);
2676 }
2677 #define UNSETENV_OK
2678 #endif
2679
2680 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2681 static void
methodname_to_envvar(const char * mname,char * buf,size_t buflen)2682 methodname_to_envvar(const char *mname, char *buf, size_t buflen)
2683 {
2684 char *cp;
2685 evutil_snprintf(buf, buflen, "EVENT_NO%s", mname);
2686 for (cp = buf; *cp; ++cp) {
2687 *cp = EVUTIL_TOUPPER_(*cp);
2688 }
2689 }
2690 #endif
2691
2692 static void
test_base_environ(void * arg)2693 test_base_environ(void *arg)
2694 {
2695 struct event_base *base = NULL;
2696 struct event_config *cfg = NULL;
2697
2698 #if defined(SETENV_OK) && defined(UNSETENV_OK)
2699 const char **basenames;
2700 int i, n_methods=0;
2701 char varbuf[128];
2702 const char *defaultname, *ignoreenvname;
2703
2704 /* See if unsetenv works before we rely on it. */
2705 setenv("EVENT_NOWAFFLES", "1", 1);
2706 unsetenv("EVENT_NOWAFFLES");
2707 if (getenv("EVENT_NOWAFFLES") != NULL) {
2708 #ifndef EVENT__HAVE_UNSETENV
2709 TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test"));
2710 #else
2711 TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test"));
2712 #endif
2713 tt_skip();
2714 }
2715
2716 basenames = event_get_supported_methods();
2717 for (i = 0; basenames[i]; ++i) {
2718 methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf));
2719 unsetenv(varbuf);
2720 ++n_methods;
2721 }
2722
2723 base = event_base_new();
2724 tt_assert(base);
2725
2726 defaultname = event_base_get_method(base);
2727 TT_BLATHER(("default is <%s>", defaultname));
2728 event_base_free(base);
2729 base = NULL;
2730
2731 /* Can we disable the method with EVENT_NOfoo ? */
2732 if (!strcmp(defaultname, "epoll (with changelist)")) {
2733 setenv("EVENT_NOEPOLL", "1", 1);
2734 ignoreenvname = "epoll";
2735 } else {
2736 methodname_to_envvar(defaultname, varbuf, sizeof(varbuf));
2737 setenv(varbuf, "1", 1);
2738 ignoreenvname = defaultname;
2739 }
2740
2741 /* Use an empty cfg rather than NULL so a failure doesn't exit() */
2742 cfg = event_config_new();
2743 base = event_base_new_with_config(cfg);
2744 event_config_free(cfg);
2745 cfg = NULL;
2746 if (n_methods == 1) {
2747 tt_assert(!base);
2748 } else {
2749 tt_assert(base);
2750 tt_str_op(defaultname, !=, event_base_get_method(base));
2751 event_base_free(base);
2752 base = NULL;
2753 }
2754
2755 /* Can we disable looking at the environment with IGNORE_ENV ? */
2756 cfg = event_config_new();
2757 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2758 base = event_base_new_with_config(cfg);
2759 tt_assert(base);
2760 tt_str_op(ignoreenvname, ==, event_base_get_method(base));
2761 #else
2762 tt_skip();
2763 #endif
2764
2765 end:
2766 if (base)
2767 event_base_free(base);
2768 if (cfg)
2769 event_config_free(cfg);
2770 }
2771
2772 static void
read_called_once_cb(evutil_socket_t fd,short event,void * arg)2773 read_called_once_cb(evutil_socket_t fd, short event, void *arg)
2774 {
2775 tt_int_op(event, ==, EV_READ);
2776 called += 1;
2777 end:
2778 ;
2779 }
2780
2781 static void
timeout_called_once_cb(evutil_socket_t fd,short event,void * arg)2782 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg)
2783 {
2784 tt_int_op(event, ==, EV_TIMEOUT);
2785 called += 100;
2786 end:
2787 ;
2788 }
2789
2790 static void
immediate_called_twice_cb(evutil_socket_t fd,short event,void * arg)2791 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg)
2792 {
2793 tt_int_op(event, ==, EV_TIMEOUT);
2794 called += 1000;
2795 end:
2796 ;
2797 }
2798
2799 static void
test_event_once(void * ptr)2800 test_event_once(void *ptr)
2801 {
2802 struct basic_test_data *data = ptr;
2803 struct timeval tv;
2804 int r;
2805
2806 tv.tv_sec = 0;
2807 tv.tv_usec = 50*1000;
2808 called = 0;
2809 r = event_base_once(data->base, data->pair[0], EV_READ,
2810 read_called_once_cb, NULL, NULL);
2811 tt_int_op(r, ==, 0);
2812 r = event_base_once(data->base, -1, EV_TIMEOUT,
2813 timeout_called_once_cb, NULL, &tv);
2814 tt_int_op(r, ==, 0);
2815 r = event_base_once(data->base, -1, 0, NULL, NULL, NULL);
2816 tt_int_op(r, <, 0);
2817 r = event_base_once(data->base, -1, EV_TIMEOUT,
2818 immediate_called_twice_cb, NULL, NULL);
2819 tt_int_op(r, ==, 0);
2820 tv.tv_sec = 0;
2821 tv.tv_usec = 0;
2822 r = event_base_once(data->base, -1, EV_TIMEOUT,
2823 immediate_called_twice_cb, NULL, &tv);
2824 tt_int_op(r, ==, 0);
2825
2826 if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) {
2827 tt_fail_perror("write");
2828 }
2829
2830 shutdown(data->pair[1], EVUTIL_SHUT_WR);
2831
2832 event_base_dispatch(data->base);
2833
2834 tt_int_op(called, ==, 2101);
2835 end:
2836 ;
2837 }
2838
2839 static void
test_event_once_never(void * ptr)2840 test_event_once_never(void *ptr)
2841 {
2842 struct basic_test_data *data = ptr;
2843 struct timeval tv;
2844
2845 /* Have one trigger in 10 seconds (don't worry, because) */
2846 tv.tv_sec = 10;
2847 tv.tv_usec = 0;
2848 called = 0;
2849 event_base_once(data->base, -1, EV_TIMEOUT,
2850 timeout_called_once_cb, NULL, &tv);
2851
2852 /* But shut down the base in 75 msec. */
2853 tv.tv_sec = 0;
2854 tv.tv_usec = 75*1000;
2855 event_base_loopexit(data->base, &tv);
2856
2857 event_base_dispatch(data->base);
2858
2859 tt_int_op(called, ==, 0);
2860 end:
2861 ;
2862 }
2863
2864 static void
test_event_pending(void * ptr)2865 test_event_pending(void *ptr)
2866 {
2867 struct basic_test_data *data = ptr;
2868 struct event *r=NULL, *w=NULL, *t=NULL;
2869 struct timeval tv, now, tv2;
2870
2871 tv.tv_sec = 0;
2872 tv.tv_usec = 500 * 1000;
2873 r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb,
2874 NULL);
2875 w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb,
2876 NULL);
2877 t = evtimer_new(data->base, timeout_cb, NULL);
2878
2879 tt_assert(r);
2880 tt_assert(w);
2881 tt_assert(t);
2882
2883 evutil_gettimeofday(&now, NULL);
2884 event_add(r, NULL);
2885 event_add(t, &tv);
2886
2887 tt_assert( event_pending(r, EV_READ, NULL));
2888 tt_assert(!event_pending(w, EV_WRITE, NULL));
2889 tt_assert(!event_pending(r, EV_WRITE, NULL));
2890 tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL));
2891 tt_assert(!event_pending(r, EV_TIMEOUT, NULL));
2892 tt_assert( event_pending(t, EV_TIMEOUT, NULL));
2893 tt_assert( event_pending(t, EV_TIMEOUT, &tv2));
2894
2895 tt_assert(evutil_timercmp(&tv2, &now, >));
2896
2897 test_timeval_diff_eq(&now, &tv2, 500);
2898
2899 end:
2900 if (r) {
2901 event_del(r);
2902 event_free(r);
2903 }
2904 if (w) {
2905 event_del(w);
2906 event_free(w);
2907 }
2908 if (t) {
2909 event_del(t);
2910 event_free(t);
2911 }
2912 }
2913
2914 static void
dfd_cb(evutil_socket_t fd,short e,void * data)2915 dfd_cb(evutil_socket_t fd, short e, void *data)
2916 {
2917 *(int*)data = (int)e;
2918 }
2919
2920 static void
test_event_closed_fd_poll(void * arg)2921 test_event_closed_fd_poll(void *arg)
2922 {
2923 struct timeval tv;
2924 struct event *e;
2925 struct basic_test_data *data = (struct basic_test_data *)arg;
2926 int i = 0;
2927
2928 if (strcmp(event_base_get_method(data->base), "poll")) {
2929 tinytest_set_test_skipped_();
2930 return;
2931 }
2932
2933 e = event_new(data->base, data->pair[0], EV_READ, dfd_cb, &i);
2934 tt_assert(e);
2935
2936 tv.tv_sec = 0;
2937 tv.tv_usec = 500 * 1000;
2938 event_add(e, &tv);
2939 tt_assert(event_pending(e, EV_READ, NULL));
2940 close(data->pair[0]);
2941 data->pair[0] = -1; /** avoids double-close */
2942 event_base_loop(data->base, EVLOOP_ONCE);
2943 tt_int_op(i, ==, EV_READ);
2944
2945 end:
2946 if (e) {
2947 event_del(e);
2948 event_free(e);
2949 }
2950 }
2951
2952 #ifndef _WIN32
2953 /* You can't do this test on windows, since dup2 doesn't work on sockets */
2954
2955 /* Regression test for our workaround for a fun epoll/linux related bug
2956 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2)
2957 * will get you an EEXIST */
2958 static void
test_dup_fd(void * arg)2959 test_dup_fd(void *arg)
2960 {
2961 struct basic_test_data *data = arg;
2962 struct event_base *base = data->base;
2963 struct event *ev1=NULL, *ev2=NULL;
2964 int fd, dfd=-1;
2965 int ev1_got, ev2_got;
2966
2967 tt_int_op(write(data->pair[0], "Hello world",
2968 strlen("Hello world")), >, 0);
2969 fd = data->pair[1];
2970
2971 dfd = dup(fd);
2972 tt_int_op(dfd, >=, 0);
2973
2974 ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got);
2975 ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got);
2976 ev1_got = ev2_got = 0;
2977 event_add(ev1, NULL);
2978 event_add(ev2, NULL);
2979 event_base_loop(base, EVLOOP_ONCE);
2980 tt_int_op(ev1_got, ==, EV_READ);
2981 tt_int_op(ev2_got, ==, EV_READ);
2982
2983 /* Now close and delete dfd then dispatch. We need to do the
2984 * dispatch here so that when we add it later, we think there
2985 * was an intermediate delete. */
2986 close(dfd);
2987 event_del(ev2);
2988 ev1_got = ev2_got = 0;
2989 event_base_loop(base, EVLOOP_ONCE);
2990 tt_want_int_op(ev1_got, ==, EV_READ);
2991 tt_int_op(ev2_got, ==, 0);
2992
2993 /* Re-duplicate the fd. We need to get the same duplicated
2994 * value that we closed to provoke the epoll quirk. Also, we
2995 * need to change the events to write, or else the old lingering
2996 * read event will make the test pass whether the change was
2997 * successful or not. */
2998 tt_int_op(dup2(fd, dfd), ==, dfd);
2999 event_free(ev2);
3000 ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got);
3001 event_add(ev2, NULL);
3002 ev1_got = ev2_got = 0;
3003 event_base_loop(base, EVLOOP_ONCE);
3004 tt_want_int_op(ev1_got, ==, EV_READ);
3005 tt_int_op(ev2_got, ==, EV_WRITE);
3006
3007 end:
3008 if (ev1)
3009 event_free(ev1);
3010 if (ev2)
3011 event_free(ev2);
3012 if (dfd >= 0)
3013 close(dfd);
3014 }
3015 #endif
3016
3017 #ifdef EVENT__DISABLE_MM_REPLACEMENT
3018 static void
test_mm_functions(void * arg)3019 test_mm_functions(void *arg)
3020 {
3021 tinytest_set_test_skipped_();
3022 }
3023 #else
3024 static int
check_dummy_mem_ok(void * mem_)3025 check_dummy_mem_ok(void *mem_)
3026 {
3027 char *mem = mem_;
3028 mem -= 16;
3029 return !memcmp(mem, "{[<guardedram>]}", 16);
3030 }
3031
3032 static void *
dummy_malloc(size_t len)3033 dummy_malloc(size_t len)
3034 {
3035 char *mem = malloc(len+16);
3036 memcpy(mem, "{[<guardedram>]}", 16);
3037 return mem+16;
3038 }
3039
3040 static void *
dummy_realloc(void * mem_,size_t len)3041 dummy_realloc(void *mem_, size_t len)
3042 {
3043 char *mem = mem_;
3044 if (!mem)
3045 return dummy_malloc(len);
3046 tt_want(check_dummy_mem_ok(mem_));
3047 mem -= 16;
3048 mem = realloc(mem, len+16);
3049 return mem+16;
3050 }
3051
3052 static void
dummy_free(void * mem_)3053 dummy_free(void *mem_)
3054 {
3055 char *mem = mem_;
3056 tt_want(check_dummy_mem_ok(mem_));
3057 mem -= 16;
3058 free(mem);
3059 }
3060
3061 static void
test_mm_functions(void * arg)3062 test_mm_functions(void *arg)
3063 {
3064 struct event_base *b = NULL;
3065 struct event_config *cfg = NULL;
3066 event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free);
3067 cfg = event_config_new();
3068 event_config_avoid_method(cfg, "Nonesuch");
3069 b = event_base_new_with_config(cfg);
3070 tt_assert(b);
3071 tt_assert(check_dummy_mem_ok(b));
3072 end:
3073 if (cfg)
3074 event_config_free(cfg);
3075 if (b)
3076 event_base_free(b);
3077 }
3078 #endif
3079
3080 static void
many_event_cb(evutil_socket_t fd,short event,void * arg)3081 many_event_cb(evutil_socket_t fd, short event, void *arg)
3082 {
3083 int *calledp = arg;
3084 *calledp += 1;
3085 }
3086
3087 static void
test_many_events(void * arg)3088 test_many_events(void *arg)
3089 {
3090 /* Try 70 events that should all be ready at once. This will
3091 * exercise the "resize" code on most of the backends, and will make
3092 * sure that we can get past the 64-handle limit of some windows
3093 * functions. */
3094 #define MANY 70
3095
3096 struct basic_test_data *data = arg;
3097 struct event_base *base = data->base;
3098 int one_at_a_time = data->setup_data != NULL;
3099 evutil_socket_t sock[MANY];
3100 struct event *ev[MANY];
3101 int xcalled[MANY];
3102 int i;
3103 int loopflags = EVLOOP_NONBLOCK, evflags=0;
3104 if (one_at_a_time) {
3105 loopflags |= EVLOOP_ONCE;
3106 evflags = EV_PERSIST;
3107 }
3108
3109 memset(sock, 0xff, sizeof(sock));
3110 memset(ev, 0, sizeof(ev));
3111 memset(xcalled, 0, sizeof(xcalled));
3112
3113 for (i = 0; i < MANY; ++i) {
3114 /* We need an event that will hit the backend, and that will
3115 * be ready immediately. "Send a datagram" is an easy
3116 * instance of that. */
3117 sock[i] = socket(AF_INET, SOCK_DGRAM, 0);
3118 tt_assert(sock[i] >= 0);
3119 tt_assert(!evutil_make_socket_nonblocking(sock[i]));
3120 xcalled[i] = 0;
3121 ev[i] = event_new(base, sock[i], EV_WRITE|evflags,
3122 many_event_cb, &xcalled[i]);
3123 event_add(ev[i], NULL);
3124 if (one_at_a_time)
3125 event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE);
3126 }
3127
3128 event_base_loop(base, loopflags);
3129
3130 for (i = 0; i < MANY; ++i) {
3131 if (one_at_a_time)
3132 tt_int_op(xcalled[i], ==, MANY - i + 1);
3133 else
3134 tt_int_op(xcalled[i], ==, 1);
3135 }
3136
3137 end:
3138 for (i = 0; i < MANY; ++i) {
3139 if (ev[i])
3140 event_free(ev[i]);
3141 if (sock[i] >= 0)
3142 evutil_closesocket(sock[i]);
3143 }
3144 #undef MANY
3145 }
3146
3147 static void
test_struct_event_size(void * arg)3148 test_struct_event_size(void *arg)
3149 {
3150 tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event));
3151 end:
3152 ;
3153 }
3154
3155 static void
test_get_assignment(void * arg)3156 test_get_assignment(void *arg)
3157 {
3158 struct basic_test_data *data = arg;
3159 struct event_base *base = data->base;
3160 struct event *ev1 = NULL;
3161 const char *str = "foo";
3162
3163 struct event_base *b;
3164 evutil_socket_t s;
3165 short what;
3166 event_callback_fn cb;
3167 void *cb_arg;
3168
3169 ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb,
3170 __UNCONST(str));
3171 event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg);
3172
3173 tt_ptr_op(b, ==, base);
3174 tt_fd_op(s, ==, data->pair[1]);
3175 tt_int_op(what, ==, EV_READ);
3176 tt_ptr_op(cb, ==, dummy_read_cb);
3177 tt_ptr_op(cb_arg, ==, str);
3178
3179 /* Now make sure this doesn't crash. */
3180 event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL);
3181
3182 end:
3183 if (ev1)
3184 event_free(ev1);
3185 }
3186
3187 struct foreach_helper {
3188 int count;
3189 const struct event *ev;
3190 };
3191
3192 static int
foreach_count_cb(const struct event_base * base,const struct event * ev,void * arg)3193 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg)
3194 {
3195 struct foreach_helper *h = event_get_callback_arg(ev);
3196 struct timeval *tv = arg;
3197 if (event_get_callback(ev) != timeout_cb)
3198 return 0;
3199 tt_ptr_op(event_get_base(ev), ==, base);
3200 tt_int_op(tv->tv_sec, ==, 10);
3201 h->ev = ev;
3202 h->count++;
3203 return 0;
3204 end:
3205 return -1;
3206 }
3207
3208 static int
foreach_find_cb(const struct event_base * base,const struct event * ev,void * arg)3209 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg)
3210 {
3211 const struct event **ev_out = arg;
3212 struct foreach_helper *h = event_get_callback_arg(ev);
3213 if (event_get_callback(ev) != timeout_cb)
3214 return 0;
3215 if (h->count == 99) {
3216 *ev_out = ev;
3217 return 101;
3218 }
3219 return 0;
3220 }
3221
3222 static void
test_event_foreach(void * arg)3223 test_event_foreach(void *arg)
3224 {
3225 struct basic_test_data *data = arg;
3226 struct event_base *base = data->base;
3227 struct event *ev[5];
3228 struct foreach_helper visited[5];
3229 int i;
3230 struct timeval ten_sec = {10,0};
3231 const struct event *ev_found = NULL;
3232
3233 for (i = 0; i < 5; ++i) {
3234 visited[i].count = 0;
3235 visited[i].ev = NULL;
3236 ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]);
3237 }
3238
3239 tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL));
3240 tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL));
3241
3242 event_add(ev[0], &ten_sec);
3243 event_add(ev[1], &ten_sec);
3244 event_active(ev[1], EV_TIMEOUT, 1);
3245 event_active(ev[2], EV_TIMEOUT, 1);
3246 event_add(ev[3], &ten_sec);
3247 /* Don't touch ev[4]. */
3248
3249 tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb,
3250 &ten_sec));
3251 tt_int_op(1, ==, visited[0].count);
3252 tt_int_op(1, ==, visited[1].count);
3253 tt_int_op(1, ==, visited[2].count);
3254 tt_int_op(1, ==, visited[3].count);
3255 tt_ptr_op(ev[0], ==, visited[0].ev);
3256 tt_ptr_op(ev[1], ==, visited[1].ev);
3257 tt_ptr_op(ev[2], ==, visited[2].ev);
3258 tt_ptr_op(ev[3], ==, visited[3].ev);
3259
3260 visited[2].count = 99;
3261 tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb,
3262 &ev_found));
3263 tt_ptr_op(ev_found, ==, ev[2]);
3264
3265 end:
3266 for (i=0; i<5; ++i) {
3267 event_free(ev[i]);
3268 }
3269 }
3270
3271 static struct event_base *cached_time_base = NULL;
3272 static int cached_time_reset = 0;
3273 static int cached_time_sleep = 0;
3274 static void
cache_time_cb(evutil_socket_t fd,short what,void * arg)3275 cache_time_cb(evutil_socket_t fd, short what, void *arg)
3276 {
3277 struct timeval *tv = arg;
3278 tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv));
3279 if (cached_time_sleep) {
3280 struct timeval delay = { 0, 30*1000 };
3281 evutil_usleep_(&delay);
3282 }
3283 if (cached_time_reset) {
3284 event_base_update_cache_time(cached_time_base);
3285 }
3286 end:
3287 ;
3288 }
3289
3290 static void
test_gettimeofday_cached(void * arg)3291 test_gettimeofday_cached(void *arg)
3292 {
3293 struct basic_test_data *data = arg;
3294 struct event_config *cfg = NULL;
3295 struct event_base *base = NULL;
3296 struct timeval tv1, tv2, tv3, now;
3297 struct event *ev1=NULL, *ev2=NULL, *ev3=NULL;
3298 int cached_time_disable = strstr(data->setup_data, "disable") != NULL;
3299
3300 cfg = event_config_new();
3301 if (cached_time_disable) {
3302 event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME);
3303 }
3304 cached_time_base = base = event_base_new_with_config(cfg);
3305 tt_assert(base);
3306
3307 /* Try gettimeofday_cached outside of an event loop. */
3308 evutil_gettimeofday(&now, NULL);
3309 tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1));
3310 tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2));
3311 tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10);
3312 tt_int_op(timeval_msec_diff(&tv1, &now), <, 10);
3313
3314 cached_time_reset = strstr(data->setup_data, "reset") != NULL;
3315 cached_time_sleep = strstr(data->setup_data, "sleep") != NULL;
3316
3317 ev1 = event_new(base, -1, 0, cache_time_cb, &tv1);
3318 ev2 = event_new(base, -1, 0, cache_time_cb, &tv2);
3319 ev3 = event_new(base, -1, 0, cache_time_cb, &tv3);
3320
3321 event_active(ev1, EV_TIMEOUT, 1);
3322 event_active(ev2, EV_TIMEOUT, 1);
3323 event_active(ev3, EV_TIMEOUT, 1);
3324
3325 event_base_dispatch(base);
3326
3327 if (cached_time_reset && cached_time_sleep) {
3328 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3329 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3330 } else if (cached_time_disable && cached_time_sleep) {
3331 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3332 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3333 } else if (! cached_time_disable) {
3334 tt_assert(evutil_timercmp(&tv1, &tv2, ==));
3335 tt_assert(evutil_timercmp(&tv2, &tv3, ==));
3336 }
3337
3338 end:
3339 if (ev1)
3340 event_free(ev1);
3341 if (ev2)
3342 event_free(ev2);
3343 if (ev3)
3344 event_free(ev3);
3345 if (base)
3346 event_base_free(base);
3347 if (cfg)
3348 event_config_free(cfg);
3349 }
3350
3351 static void
tabf_cb(evutil_socket_t fd,short what,void * arg)3352 tabf_cb(evutil_socket_t fd, short what, void *arg)
3353 {
3354 int *ptr = arg;
3355 *ptr = what;
3356 *ptr += 0x10000;
3357 }
3358
3359 static void
test_evmap_invalid_slots(void * arg)3360 test_evmap_invalid_slots(void *arg)
3361 {
3362 struct basic_test_data *data = arg;
3363 struct event_base *base = data->base;
3364 struct event *ev1 = NULL, *ev2 = NULL;
3365 int e1, e2;
3366 #ifndef _WIN32
3367 struct event *ev3 = NULL, *ev4 = NULL;
3368 int e3, e4;
3369 #endif
3370
3371 ev1 = evsignal_new(base, -1, dummy_read_cb, (void *)base);
3372 ev2 = evsignal_new(base, NSIG, dummy_read_cb, (void *)base);
3373 tt_assert(ev1);
3374 tt_assert(ev2);
3375 e1 = event_add(ev1, NULL);
3376 e2 = event_add(ev2, NULL);
3377 tt_int_op(e1, !=, 0);
3378 tt_int_op(e2, !=, 0);
3379 #ifndef _WIN32
3380 ev3 = event_new(base, INT_MAX, EV_READ, dummy_read_cb, (void *)base);
3381 ev4 = event_new(base, INT_MAX / 2, EV_READ, dummy_read_cb, (void *)base);
3382 tt_assert(ev3);
3383 tt_assert(ev4);
3384 e3 = event_add(ev3, NULL);
3385 e4 = event_add(ev4, NULL);
3386 tt_int_op(e3, !=, 0);
3387 tt_int_op(e4, !=, 0);
3388 #endif
3389
3390 end:
3391 event_free(ev1);
3392 event_free(ev2);
3393 #ifndef _WIN32
3394 event_free(ev3);
3395 event_free(ev4);
3396 #endif
3397 }
3398
3399 static void
test_active_by_fd(void * arg)3400 test_active_by_fd(void *arg)
3401 {
3402 struct basic_test_data *data = arg;
3403 struct event_base *base = data->base;
3404 struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL;
3405 int e1,e2,e3,e4;
3406 #ifndef _WIN32
3407 struct event *evsig = NULL;
3408 int es;
3409 #endif
3410 struct timeval tenmin = { 600, 0 };
3411
3412 /* Ensure no crash on nonexistent FD. */
3413 event_base_active_by_fd(base, 1000, EV_READ);
3414
3415 /* Ensure no crash on bogus FD. */
3416 event_base_active_by_fd(base, -1, EV_READ);
3417
3418 /* Ensure no crash on nonexistent/bogus signal. */
3419 event_base_active_by_signal(base, 1000);
3420 event_base_active_by_signal(base, -1);
3421
3422 event_base_assert_ok_(base);
3423
3424 e1 = e2 = e3 = e4 = 0;
3425 ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1);
3426 ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2);
3427 ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3);
3428 ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4);
3429 tt_assert(ev1);
3430 tt_assert(ev2);
3431 tt_assert(ev3);
3432 tt_assert(ev4);
3433 #ifndef _WIN32
3434 evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es);
3435 tt_assert(evsig);
3436 event_add(evsig, &tenmin);
3437 #endif
3438
3439 event_add(ev1, &tenmin);
3440 event_add(ev2, NULL);
3441 event_add(ev3, NULL);
3442 event_add(ev4, &tenmin);
3443
3444
3445 event_base_assert_ok_(base);
3446
3447 /* Trigger 2, 3, 4 */
3448 event_base_active_by_fd(base, data->pair[0], EV_WRITE);
3449 event_base_active_by_fd(base, data->pair[1], EV_READ);
3450 event_base_active_by_fd(base, data->pair[1], EV_TIMEOUT);
3451 #ifndef _WIN32
3452 event_base_active_by_signal(base, SIGHUP);
3453 #endif
3454
3455 event_base_assert_ok_(base);
3456
3457 event_base_loop(base, EVLOOP_ONCE);
3458
3459 tt_int_op(e1, ==, 0);
3460 tt_int_op(e2, ==, EV_WRITE | 0x10000);
3461 tt_int_op(e3, ==, EV_READ | 0x10000);
3462 /* Mask out EV_WRITE here, since it could be genuinely writeable. */
3463 tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | EV_TIMEOUT | 0x10000);
3464 #ifndef _WIN32
3465 tt_int_op(es, ==, EV_SIGNAL | 0x10000);
3466 #endif
3467
3468 end:
3469 if (ev1)
3470 event_free(ev1);
3471 if (ev2)
3472 event_free(ev2);
3473 if (ev3)
3474 event_free(ev3);
3475 if (ev4)
3476 event_free(ev4);
3477 #ifndef _WIN32
3478 if (evsig)
3479 event_free(evsig);
3480 #endif
3481 }
3482
3483 struct testcase_t main_testcases[] = {
3484 /* Some converted-over tests */
3485 { "methods", test_methods, TT_FORK, NULL, NULL },
3486 { "version", test_version, 0, NULL, NULL },
3487 BASIC(base_features, TT_FORK|TT_NO_LOGS),
3488 { "base_environ", test_base_environ, TT_FORK, NULL, NULL },
3489
3490 BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR),
3491 BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR),
3492
3493 BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE),
3494 BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE),
3495 BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE),
3496 BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE),
3497 BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE),
3498 BASIC(evmap_invalid_slots, TT_FORK|TT_NEED_BASE),
3499
3500 BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3501 BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3502 BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR|TT_RETRIABLE),
3503 BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3504
3505 /* These are still using the old API */
3506 LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
3507 { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3508 { "persistent_active_timeout", test_persistent_active_timeout,
3509 TT_FORK|TT_NEED_BASE|TT_RETRIABLE, &basic_setup, NULL },
3510 LEGACY(priorities, TT_FORK|TT_NEED_BASE),
3511 BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE),
3512 { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
3513 &basic_setup, NULL },
3514
3515 /* These legacy tests may not all need all of these flags. */
3516 LEGACY(simpleread, TT_ISOLATED),
3517 LEGACY(simpleread_multiple, TT_ISOLATED),
3518 LEGACY(simplewrite, TT_ISOLATED),
3519 { "simpleclose_rw", test_simpleclose_rw, TT_FORK, &basic_setup, NULL },
3520 /* simpleclose */
3521 { "simpleclose_close", test_simpleclose,
3522 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3523 &basic_setup, __UNCONST("close") },
3524 { "simpleclose_shutdown", test_simpleclose,
3525 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3526 &basic_setup, __UNCONST("shutdown") },
3527 /* simpleclose_*_persist */
3528 { "simpleclose_close_persist", test_simpleclose,
3529 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3530 &basic_setup, __UNCONST("close_persist") },
3531 { "simpleclose_shutdown_persist", test_simpleclose,
3532 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3533 &basic_setup, __UNCONST("shutdown_persist") },
3534 /* simpleclose_*_et */
3535 { "simpleclose_close_et", test_simpleclose,
3536 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3537 &basic_setup, __UNCONST("close_ET") },
3538 { "simpleclose_shutdown_et", test_simpleclose,
3539 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3540 &basic_setup, __UNCONST("shutdown_ET") },
3541 /* simpleclose_*_persist_et */
3542 { "simpleclose_close_persist_et", test_simpleclose,
3543 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3544 &basic_setup, __UNCONST("close_persist_ET") },
3545 { "simpleclose_shutdown_persist_et", test_simpleclose,
3546 TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE,
3547 &basic_setup, __UNCONST("shutdown_persist_ET") },
3548 LEGACY(multiple, TT_ISOLATED),
3549 LEGACY(persistent, TT_ISOLATED),
3550 LEGACY(combined, TT_ISOLATED),
3551 LEGACY(simpletimeout, TT_ISOLATED),
3552 LEGACY(loopbreak, TT_ISOLATED),
3553 LEGACY(loopexit, TT_ISOLATED),
3554 LEGACY(loopexit_multiple, TT_ISOLATED),
3555 { "nonpersist_readd", test_nonpersist_readd, TT_FORK|TT_NEED_SOCKETPAIR|TT_NEED_BASE, &basic_setup, NULL },
3556 LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
3557 LEGACY(want_only_once, TT_ISOLATED),
3558 { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL },
3559 { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL },
3560 { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup,
3561 NULL },
3562 { "event_closed_fd_poll", test_event_closed_fd_poll, TT_ISOLATED, &basic_setup,
3563 NULL },
3564
3565 #ifndef _WIN32
3566 { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL },
3567 #endif
3568 { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL },
3569 { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL },
3570 { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 },
3571
3572 { "struct_event_size", test_struct_event_size, 0, NULL, NULL },
3573 BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3574
3575 BASIC(event_foreach, TT_FORK|TT_NEED_BASE),
3576 { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("") },
3577 { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep") },
3578 { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep reset") },
3579 { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep disable") },
3580 { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("disable") },
3581
3582 BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3583
3584 #ifndef _WIN32
3585 LEGACY(fork, TT_ISOLATED),
3586 #endif
3587
3588 #ifdef EVTHREAD_USE_PTHREADS_IMPLEMENTED
3589 LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS|TT_RETRIABLE),
3590 LEGACY(del_notify, TT_ISOLATED|TT_NEED_THREADS),
3591 #endif
3592
3593 END_OF_TESTCASES
3594 };
3595
3596 struct testcase_t evtag_testcases[] = {
3597 { "int", evtag_int_test, TT_FORK, NULL, NULL },
3598 { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL },
3599 { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL },
3600 { "peek", evtag_test_peek, 0, NULL, NULL },
3601
3602 END_OF_TESTCASES
3603 };
3604
3605 struct testcase_t signal_testcases[] = {
3606 #ifndef _WIN32
3607 LEGACY(simplestsignal, TT_ISOLATED),
3608 LEGACY(simplesignal, TT_ISOLATED),
3609 LEGACY(multiplesignal, TT_ISOLATED),
3610 LEGACY(immediatesignal, TT_ISOLATED),
3611 LEGACY(signal_dealloc, TT_ISOLATED),
3612 LEGACY(signal_pipeloss, TT_ISOLATED),
3613 LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS),
3614 LEGACY(signal_restore, TT_ISOLATED),
3615 LEGACY(signal_assert, TT_ISOLATED),
3616 LEGACY(signal_while_processing, TT_ISOLATED),
3617 #endif
3618 END_OF_TESTCASES
3619 };
3620
3621