1 /* $NetBSD: regress.c,v 1.9 2017/01/31 23:17:40 christos Exp $ */ 2 /* 3 * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu> 4 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 #include "util-internal.h" 29 30 #ifdef _WIN32 31 #include <winsock2.h> 32 #include <windows.h> 33 #endif 34 35 #ifdef EVENT__HAVE_PTHREADS 36 #include <pthread.h> 37 #endif 38 39 #include "event2/event-config.h" 40 #include <sys/cdefs.h> 41 __RCSID("$NetBSD: regress.c,v 1.9 2017/01/31 23:17:40 christos Exp $"); 42 43 #include <sys/types.h> 44 #include <sys/stat.h> 45 #ifdef EVENT__HAVE_SYS_TIME_H 46 #include <sys/time.h> 47 #endif 48 #include <sys/queue.h> 49 #ifndef _WIN32 50 #include <sys/socket.h> 51 #include <sys/wait.h> 52 #include <signal.h> 53 #include <unistd.h> 54 #include <netdb.h> 55 #endif 56 #include <fcntl.h> 57 #include <signal.h> 58 #include <stdlib.h> 59 #include <stdio.h> 60 #include <string.h> 61 #include <errno.h> 62 #include <assert.h> 63 #include <ctype.h> 64 65 #include "event2/event.h" 66 #include "event2/event_struct.h" 67 #include "event2/event_compat.h" 68 #include "event2/tag.h" 69 #include "event2/buffer.h" 70 #include "event2/buffer_compat.h" 71 #include "event2/util.h" 72 #include "event-internal.h" 73 #include "evthread-internal.h" 74 #include "log-internal.h" 75 #include "time-internal.h" 76 77 #include "regress.h" 78 79 #ifndef _WIN32 80 #include "regress.gen.h" 81 #endif 82 83 evutil_socket_t pair[2]; 84 int test_ok; 85 int called; 86 struct event_base *global_base; 87 88 static char wbuf[4096]; 89 static char rbuf[4096]; 90 static int woff; 91 static int roff; 92 static int usepersist; 93 static struct timeval tset; 94 static struct timeval tcalled; 95 96 97 #define TEST1 "this is a test" 98 99 #ifdef _WIN32 100 #define write(fd,buf,len) send((fd),(buf),(int)(len),0) 101 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0) 102 #endif 103 104 struct basic_cb_args 105 { 106 struct event_base *eb; 107 struct event *ev; 108 unsigned int callcount; 109 }; 110 111 static void 112 simple_read_cb(evutil_socket_t fd, short event, void *arg) 113 { 114 char buf[256]; 115 int len; 116 117 len = read(fd, buf, sizeof(buf)); 118 119 if (len) { 120 if (!called) { 121 if (event_add(arg, NULL) == -1) 122 exit(1); 123 } 124 } else if (called == 1) 125 test_ok = 1; 126 127 called++; 128 } 129 130 static void 131 basic_read_cb(evutil_socket_t fd, short event, void *data) 132 { 133 char buf[256]; 134 int len; 135 struct basic_cb_args *arg = data; 136 137 len = read(fd, buf, sizeof(buf)); 138 139 if (len < 0) { 140 tt_fail_perror("read (callback)"); 141 } else { 142 switch (arg->callcount++) { 143 case 0: /* first call: expect to read data; cycle */ 144 if (len > 0) 145 return; 146 147 tt_fail_msg("EOF before data read"); 148 break; 149 150 case 1: /* second call: expect EOF; stop */ 151 if (len > 0) 152 tt_fail_msg("not all data read on first cycle"); 153 break; 154 155 default: /* third call: should not happen */ 156 tt_fail_msg("too many cycles"); 157 } 158 } 159 160 event_del(arg->ev); 161 event_base_loopexit(arg->eb, NULL); 162 } 163 164 static void 165 dummy_read_cb(evutil_socket_t fd, short event, void *arg) 166 { 167 } 168 169 static void 170 simple_write_cb(evutil_socket_t fd, short event, void *arg) 171 { 172 int len; 173 174 len = write(fd, TEST1, strlen(TEST1) + 1); 175 if (len == -1) 176 test_ok = 0; 177 else 178 test_ok = 1; 179 } 180 181 static void 182 multiple_write_cb(evutil_socket_t fd, short event, void *arg) 183 { 184 struct event *ev = arg; 185 int len; 186 187 len = 128; 188 if (woff + len >= (int)sizeof(wbuf)) 189 len = sizeof(wbuf) - woff; 190 191 len = write(fd, wbuf + woff, len); 192 if (len == -1) { 193 fprintf(stderr, "%s: write\n", __func__); 194 if (usepersist) 195 event_del(ev); 196 return; 197 } 198 199 woff += len; 200 201 if (woff >= (int)sizeof(wbuf)) { 202 shutdown(fd, EVUTIL_SHUT_WR); 203 if (usepersist) 204 event_del(ev); 205 return; 206 } 207 208 if (!usepersist) { 209 if (event_add(ev, NULL) == -1) 210 exit(1); 211 } 212 } 213 214 static void 215 multiple_read_cb(evutil_socket_t fd, short event, void *arg) 216 { 217 struct event *ev = arg; 218 int len; 219 220 len = read(fd, rbuf + roff, sizeof(rbuf) - roff); 221 if (len == -1) 222 fprintf(stderr, "%s: read\n", __func__); 223 if (len <= 0) { 224 if (usepersist) 225 event_del(ev); 226 return; 227 } 228 229 roff += len; 230 if (!usepersist) { 231 if (event_add(ev, NULL) == -1) 232 exit(1); 233 } 234 } 235 236 static void 237 timeout_cb(evutil_socket_t fd, short event, void *arg) 238 { 239 evutil_gettimeofday(&tcalled, NULL); 240 } 241 242 struct both { 243 struct event ev; 244 int nread; 245 }; 246 247 static void 248 combined_read_cb(evutil_socket_t fd, short event, void *arg) 249 { 250 struct both *both = arg; 251 char buf[128]; 252 int len; 253 254 len = read(fd, buf, sizeof(buf)); 255 if (len == -1) 256 fprintf(stderr, "%s: read\n", __func__); 257 if (len <= 0) 258 return; 259 260 both->nread += len; 261 if (event_add(&both->ev, NULL) == -1) 262 exit(1); 263 } 264 265 static void 266 combined_write_cb(evutil_socket_t fd, short event, void *arg) 267 { 268 struct both *both = arg; 269 char buf[128]; 270 int len; 271 272 len = sizeof(buf); 273 if (len > both->nread) 274 len = both->nread; 275 276 memset(buf, 'q', len); 277 278 len = write(fd, buf, len); 279 if (len == -1) 280 fprintf(stderr, "%s: write\n", __func__); 281 if (len <= 0) { 282 shutdown(fd, EVUTIL_SHUT_WR); 283 return; 284 } 285 286 both->nread -= len; 287 if (event_add(&both->ev, NULL) == -1) 288 exit(1); 289 } 290 291 /* These macros used to replicate the work of the legacy test wrapper code */ 292 #define setup_test(x) do { \ 293 if (!in_legacy_test_wrapper) { \ 294 TT_FAIL(("Legacy test %s not wrapped properly", x)); \ 295 return; \ 296 } \ 297 } while (/*CONSTCOND*/0) 298 #define cleanup_test() setup_test("cleanup") 299 300 static void 301 test_simpleread(void) 302 { 303 struct event ev; 304 305 /* Very simple read test */ 306 setup_test("Simple read: "); 307 308 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 309 tt_fail_perror("write"); 310 } 311 312 shutdown(pair[0], EVUTIL_SHUT_WR); 313 314 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); 315 if (event_add(&ev, NULL) == -1) 316 exit(1); 317 event_dispatch(); 318 319 cleanup_test(); 320 } 321 322 static void 323 test_simplewrite(void) 324 { 325 struct event ev; 326 327 /* Very simple write test */ 328 setup_test("Simple write: "); 329 330 event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev); 331 if (event_add(&ev, NULL) == -1) 332 exit(1); 333 event_dispatch(); 334 335 cleanup_test(); 336 } 337 338 static void 339 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg) 340 { 341 if (++called == 2) 342 test_ok = 1; 343 } 344 345 static void 346 test_simpleread_multiple(void) 347 { 348 struct event one, two; 349 350 /* Very simple read test */ 351 setup_test("Simple read to multiple evens: "); 352 353 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 354 tt_fail_perror("write"); 355 } 356 357 shutdown(pair[0], EVUTIL_SHUT_WR); 358 359 event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL); 360 if (event_add(&one, NULL) == -1) 361 exit(1); 362 event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL); 363 if (event_add(&two, NULL) == -1) 364 exit(1); 365 event_dispatch(); 366 367 cleanup_test(); 368 } 369 370 static int have_closed = 0; 371 static int premature_event = 0; 372 static void 373 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr) 374 { 375 evutil_socket_t **fds = ptr; 376 TT_BLATHER(("Closing")); 377 evutil_closesocket(*fds[0]); 378 evutil_closesocket(*fds[1]); 379 *fds[0] = -1; 380 *fds[1] = -1; 381 have_closed = 1; 382 } 383 384 static void 385 record_event_cb(evutil_socket_t s, short what, void *ptr) 386 { 387 short *whatp = ptr; 388 if (!have_closed) 389 premature_event = 1; 390 *whatp = what; 391 TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s)); 392 } 393 394 static void 395 test_simpleclose(void *ptr) 396 { 397 /* Test that a close of FD is detected as a read and as a write. */ 398 struct event_base *base = event_base_new(); 399 evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1}; 400 evutil_socket_t *to_close[2]; 401 struct event *rev=NULL, *wev=NULL, *closeev=NULL; 402 struct timeval tv; 403 short got_read_on_close = 0, got_write_on_close = 0; 404 char buf[1024]; 405 memset(buf, 99, sizeof(buf)); 406 #ifdef _WIN32 407 #define LOCAL_SOCKETPAIR_AF AF_INET 408 #else 409 #define LOCAL_SOCKETPAIR_AF AF_UNIX 410 #endif 411 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0) 412 TT_DIE(("socketpair: %s", strerror(errno))); 413 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0) 414 TT_DIE(("socketpair: %s", strerror(errno))); 415 if (evutil_make_socket_nonblocking(pair1[1]) < 0) 416 TT_DIE(("make_socket_nonblocking")); 417 if (evutil_make_socket_nonblocking(pair2[1]) < 0) 418 TT_DIE(("make_socket_nonblocking")); 419 420 /** Stuff pair2[1] full of data, until write fails */ 421 while (1) { 422 int r = write(pair2[1], buf, sizeof(buf)); 423 if (r<0) { 424 int err = evutil_socket_geterror(pair2[1]); 425 if (! EVUTIL_ERR_RW_RETRIABLE(err)) 426 TT_DIE(("write failed strangely: %s", 427 evutil_socket_error_to_string(err))); 428 break; 429 } 430 } 431 to_close[0] = &pair1[0]; 432 to_close[1] = &pair2[0]; 433 434 closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb, 435 to_close); 436 rev = event_new(base, pair1[1], EV_READ, record_event_cb, 437 &got_read_on_close); 438 TT_BLATHER(("Waiting for read on %d", (int)pair1[1])); 439 wev = event_new(base, pair2[1], EV_WRITE, record_event_cb, 440 &got_write_on_close); 441 TT_BLATHER(("Waiting for write on %d", (int)pair2[1])); 442 tv.tv_sec = 0; 443 tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make 444 * sure we get a read event. */ 445 event_add(closeev, &tv); 446 event_add(rev, NULL); 447 event_add(wev, NULL); 448 /* Don't let the test go on too long. */ 449 tv.tv_sec = 0; 450 tv.tv_usec = 200*1000; 451 event_base_loopexit(base, &tv); 452 event_base_loop(base, 0); 453 454 tt_int_op(got_read_on_close, ==, EV_READ); 455 tt_int_op(got_write_on_close, ==, EV_WRITE); 456 tt_int_op(premature_event, ==, 0); 457 458 end: 459 if (pair1[0] >= 0) 460 evutil_closesocket(pair1[0]); 461 if (pair1[1] >= 0) 462 evutil_closesocket(pair1[1]); 463 if (pair2[0] >= 0) 464 evutil_closesocket(pair2[0]); 465 if (pair2[1] >= 0) 466 evutil_closesocket(pair2[1]); 467 if (rev) 468 event_free(rev); 469 if (wev) 470 event_free(wev); 471 if (closeev) 472 event_free(closeev); 473 if (base) 474 event_base_free(base); 475 } 476 477 478 static void 479 test_multiple(void) 480 { 481 struct event ev, ev2; 482 int i; 483 484 /* Multiple read and write test */ 485 setup_test("Multiple read/write: "); 486 memset(rbuf, 0, sizeof(rbuf)); 487 for (i = 0; i < (int)sizeof(wbuf); i++) 488 wbuf[i] = i; 489 490 roff = woff = 0; 491 usepersist = 0; 492 493 event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev); 494 if (event_add(&ev, NULL) == -1) 495 exit(1); 496 event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2); 497 if (event_add(&ev2, NULL) == -1) 498 exit(1); 499 event_dispatch(); 500 501 if (roff == woff) 502 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 503 504 cleanup_test(); 505 } 506 507 static void 508 test_persistent(void) 509 { 510 struct event ev, ev2; 511 int i; 512 513 /* Multiple read and write test with persist */ 514 setup_test("Persist read/write: "); 515 memset(rbuf, 0, sizeof(rbuf)); 516 for (i = 0; i < (int)sizeof(wbuf); i++) 517 wbuf[i] = i; 518 519 roff = woff = 0; 520 usepersist = 1; 521 522 event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev); 523 if (event_add(&ev, NULL) == -1) 524 exit(1); 525 event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2); 526 if (event_add(&ev2, NULL) == -1) 527 exit(1); 528 event_dispatch(); 529 530 if (roff == woff) 531 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 532 533 cleanup_test(); 534 } 535 536 static void 537 test_combined(void) 538 { 539 struct both r1, r2, w1, w2; 540 541 setup_test("Combined read/write: "); 542 memset(&r1, 0, sizeof(r1)); 543 memset(&r2, 0, sizeof(r2)); 544 memset(&w1, 0, sizeof(w1)); 545 memset(&w2, 0, sizeof(w2)); 546 547 w1.nread = 4096; 548 w2.nread = 8192; 549 550 event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1); 551 event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1); 552 event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2); 553 event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2); 554 tt_assert(event_add(&r1.ev, NULL) != -1); 555 tt_assert(!event_add(&w1.ev, NULL)); 556 tt_assert(!event_add(&r2.ev, NULL)); 557 tt_assert(!event_add(&w2.ev, NULL)); 558 event_dispatch(); 559 560 if (r1.nread == 8192 && r2.nread == 4096) 561 test_ok = 1; 562 563 end: 564 cleanup_test(); 565 } 566 567 static void 568 test_simpletimeout(void) 569 { 570 struct timeval tv; 571 struct event ev; 572 573 setup_test("Simple timeout: "); 574 575 tv.tv_usec = 200*1000; 576 tv.tv_sec = 0; 577 evutil_timerclear(&tcalled); 578 evtimer_set(&ev, timeout_cb, NULL); 579 evtimer_add(&ev, &tv); 580 581 evutil_gettimeofday(&tset, NULL); 582 event_dispatch(); 583 test_timeval_diff_eq(&tset, &tcalled, 200); 584 585 test_ok = 1; 586 end: 587 cleanup_test(); 588 } 589 590 static void 591 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg) 592 { 593 int *count = arg; 594 595 (*count)++; 596 if (*count == 6) { 597 /* call loopexit only once - on slow machines(?), it is 598 * apparently possible for this to get called twice. */ 599 test_ok = 1; 600 event_base_loopexit(global_base, NULL); 601 } 602 } 603 604 static void 605 test_persistent_timeout(void) 606 { 607 struct timeval tv; 608 struct event ev; 609 int count = 0; 610 611 evutil_timerclear(&tv); 612 tv.tv_usec = 10000; 613 614 event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST, 615 periodic_timeout_cb, &count); 616 event_add(&ev, &tv); 617 618 event_dispatch(); 619 620 event_del(&ev); 621 } 622 623 static void 624 test_persistent_timeout_jump(void *ptr) 625 { 626 struct basic_test_data *data = ptr; 627 struct event ev; 628 int count = 0; 629 struct timeval msec100 = { 0, 100 * 1000 }; 630 struct timeval msec50 = { 0, 50 * 1000 }; 631 struct timeval msec300 = { 0, 300 * 1000 }; 632 633 event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count); 634 event_add(&ev, &msec100); 635 /* Wait for a bit */ 636 evutil_usleep_(&msec300); 637 event_base_loopexit(data->base, &msec50); 638 event_base_dispatch(data->base); 639 tt_int_op(count, ==, 1); 640 641 end: 642 event_del(&ev); 643 } 644 645 struct persist_active_timeout_called { 646 int n; 647 short events[16]; 648 struct timeval tvs[16]; 649 }; 650 651 static void 652 activate_cb(evutil_socket_t fd, short event, void *arg) 653 { 654 struct event *ev = arg; 655 event_active(ev, EV_READ, 1); 656 } 657 658 static void 659 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg) 660 { 661 struct persist_active_timeout_called *c = arg; 662 if (c->n < 15) { 663 c->events[c->n] = event; 664 evutil_gettimeofday(&c->tvs[c->n], NULL); 665 ++c->n; 666 } 667 } 668 669 static void 670 test_persistent_active_timeout(void *ptr) 671 { 672 struct timeval tv, tv2, tv_exit, start; 673 struct event ev; 674 struct persist_active_timeout_called res; 675 676 struct basic_test_data *data = ptr; 677 struct event_base *base = data->base; 678 679 memset(&res, 0, sizeof(res)); 680 681 tv.tv_sec = 0; 682 tv.tv_usec = 200 * 1000; 683 event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST, 684 persist_active_timeout_cb, &res); 685 event_add(&ev, &tv); 686 687 tv2.tv_sec = 0; 688 tv2.tv_usec = 100 * 1000; 689 event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2); 690 691 tv_exit.tv_sec = 0; 692 tv_exit.tv_usec = 600 * 1000; 693 event_base_loopexit(base, &tv_exit); 694 695 event_base_assert_ok_(base); 696 evutil_gettimeofday(&start, NULL); 697 698 event_base_dispatch(base); 699 event_base_assert_ok_(base); 700 701 tt_int_op(res.n, ==, 3); 702 tt_int_op(res.events[0], ==, EV_READ); 703 tt_int_op(res.events[1], ==, EV_TIMEOUT); 704 tt_int_op(res.events[2], ==, EV_TIMEOUT); 705 test_timeval_diff_eq(&start, &res.tvs[0], 100); 706 test_timeval_diff_eq(&start, &res.tvs[1], 300); 707 test_timeval_diff_eq(&start, &res.tvs[2], 500); 708 end: 709 event_del(&ev); 710 } 711 712 struct common_timeout_info { 713 struct event ev; 714 struct timeval called_at; 715 int which; 716 int count; 717 }; 718 719 static void 720 common_timeout_cb(evutil_socket_t fd, short event, void *arg) 721 { 722 struct common_timeout_info *ti = arg; 723 ++ti->count; 724 evutil_gettimeofday(&ti->called_at, NULL); 725 if (ti->count >= 4) 726 event_del(&ti->ev); 727 } 728 729 static void 730 test_common_timeout(void *ptr) 731 { 732 struct basic_test_data *data = ptr; 733 734 struct event_base *base = data->base; 735 int i; 736 struct common_timeout_info info[100]; 737 738 struct timeval start; 739 struct timeval tmp_100_ms = { 0, 100*1000 }; 740 struct timeval tmp_200_ms = { 0, 200*1000 }; 741 struct timeval tmp_5_sec = { 5, 0 }; 742 struct timeval tmp_5M_usec = { 0, 5*1000*1000 }; 743 744 const struct timeval *ms_100, *ms_200, *sec_5; 745 746 ms_100 = event_base_init_common_timeout(base, &tmp_100_ms); 747 ms_200 = event_base_init_common_timeout(base, &tmp_200_ms); 748 sec_5 = event_base_init_common_timeout(base, &tmp_5_sec); 749 tt_assert(ms_100); 750 tt_assert(ms_200); 751 tt_assert(sec_5); 752 tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms), 753 ==, ms_200); 754 tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200); 755 tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5); 756 tt_int_op(ms_100->tv_sec, ==, 0); 757 tt_int_op(ms_200->tv_sec, ==, 0); 758 tt_int_op(sec_5->tv_sec, ==, 5); 759 tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000); 760 tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000); 761 tt_int_op(sec_5->tv_usec, ==, 0|0x50200000); 762 763 memset(info, 0, sizeof(info)); 764 765 for (i=0; i<100; ++i) { 766 info[i].which = i; 767 event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST, 768 common_timeout_cb, &info[i]); 769 if (i % 2) { 770 if ((i%20)==1) { 771 /* Glass-box test: Make sure we survive the 772 * transition to non-common timeouts. It's 773 * a little tricky. */ 774 event_add(&info[i].ev, ms_200); 775 event_add(&info[i].ev, &tmp_100_ms); 776 } else if ((i%20)==3) { 777 /* Check heap-to-common too. */ 778 event_add(&info[i].ev, &tmp_200_ms); 779 event_add(&info[i].ev, ms_100); 780 } else if ((i%20)==5) { 781 /* Also check common-to-common. */ 782 event_add(&info[i].ev, ms_200); 783 event_add(&info[i].ev, ms_100); 784 } else { 785 event_add(&info[i].ev, ms_100); 786 } 787 } else { 788 event_add(&info[i].ev, ms_200); 789 } 790 } 791 792 event_base_assert_ok_(base); 793 evutil_gettimeofday(&start, NULL); 794 event_base_dispatch(base); 795 796 event_base_assert_ok_(base); 797 798 for (i=0; i<10; ++i) { 799 tt_int_op(info[i].count, ==, 4); 800 if (i % 2) { 801 test_timeval_diff_eq(&start, &info[i].called_at, 400); 802 } else { 803 test_timeval_diff_eq(&start, &info[i].called_at, 800); 804 } 805 } 806 807 /* Make sure we can free the base with some events in. */ 808 for (i=0; i<100; ++i) { 809 if (i % 2) { 810 event_add(&info[i].ev, ms_100); 811 } else { 812 event_add(&info[i].ev, ms_200); 813 } 814 } 815 816 end: 817 event_base_free(data->base); /* need to do this here before info is 818 * out-of-scope */ 819 data->base = NULL; 820 } 821 822 #ifndef _WIN32 823 824 #define current_base event_global_current_base_ 825 extern struct event_base *current_base; 826 827 static void 828 fork_signal_cb(evutil_socket_t fd, short events, void *arg) 829 { 830 event_del(arg); 831 } 832 833 int child_pair[2] = { -1, -1 }; 834 static void 835 simple_child_read_cb(evutil_socket_t fd, short event, void *arg) 836 { 837 char buf[256]; 838 int len; 839 840 len = read(fd, buf, sizeof(buf)); 841 if (write(child_pair[0], "", 1) < 0) 842 tt_fail_perror("write"); 843 844 if (len) { 845 if (!called) { 846 if (event_add(arg, NULL) == -1) 847 exit(1); 848 } 849 } else if (called == 1) 850 test_ok = 1; 851 852 called++; 853 } 854 static void 855 test_fork(void) 856 { 857 char c; 858 int status; 859 struct event ev, sig_ev, usr_ev, existing_ev; 860 pid_t pid; 861 int wait_flags = 0; 862 863 #ifdef EVENT__HAVE_WAITPID_WITH_WNOWAIT 864 wait_flags |= WNOWAIT; 865 #endif 866 867 setup_test("After fork: "); 868 869 { 870 if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, child_pair) == -1) { 871 fprintf(stderr, "%s: socketpair\n", __func__); 872 exit(1); 873 } 874 875 if (evutil_make_socket_nonblocking(child_pair[0]) == -1) { 876 fprintf(stderr, "fcntl(O_NONBLOCK)"); 877 exit(1); 878 } 879 } 880 881 tt_assert(current_base); 882 evthread_make_base_notifiable(current_base); 883 884 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 885 tt_fail_perror("write"); 886 } 887 888 event_set(&ev, pair[1], EV_READ, simple_child_read_cb, &ev); 889 if (event_add(&ev, NULL) == -1) 890 exit(1); 891 892 evsignal_set(&sig_ev, SIGCHLD, fork_signal_cb, &sig_ev); 893 evsignal_add(&sig_ev, NULL); 894 895 evsignal_set(&existing_ev, SIGUSR2, fork_signal_cb, &existing_ev); 896 evsignal_add(&existing_ev, NULL); 897 898 event_base_assert_ok_(current_base); 899 TT_BLATHER(("Before fork")); 900 if ((pid = regress_fork()) == 0) { 901 /* in the child */ 902 TT_BLATHER(("In child, before reinit")); 903 event_base_assert_ok_(current_base); 904 if (event_reinit(current_base) == -1) { 905 fprintf(stdout, "FAILED (reinit)\n"); 906 exit(1); 907 } 908 TT_BLATHER(("After reinit")); 909 event_base_assert_ok_(current_base); 910 TT_BLATHER(("After assert-ok")); 911 912 evsignal_del(&sig_ev); 913 914 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev); 915 evsignal_add(&usr_ev, NULL); 916 raise(SIGUSR1); 917 raise(SIGUSR2); 918 919 called = 0; 920 921 event_dispatch(); 922 923 event_base_free(current_base); 924 925 /* we do not send an EOF; simple_read_cb requires an EOF 926 * to set test_ok. we just verify that the callback was 927 * called. */ 928 exit(test_ok != 0 || called != 2 ? -2 : 76); 929 } 930 931 /** wait until client read first message */ 932 if (read(child_pair[1], &c, 1) < 0) { 933 tt_fail_perror("read"); 934 } 935 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 936 tt_fail_perror("write"); 937 } 938 939 TT_BLATHER(("Before waitpid")); 940 if (waitpid(pid, &status, wait_flags) == -1) { 941 perror("waitpid"); 942 exit(1); 943 } 944 TT_BLATHER(("After waitpid")); 945 946 if (WEXITSTATUS(status) != 76) { 947 fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status)); 948 exit(1); 949 } 950 951 /* test that the current event loop still works */ 952 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 953 fprintf(stderr, "%s: write\n", __func__); 954 } 955 956 shutdown(pair[0], EVUTIL_SHUT_WR); 957 958 evsignal_set(&usr_ev, SIGUSR1, fork_signal_cb, &usr_ev); 959 evsignal_add(&usr_ev, NULL); 960 raise(SIGUSR1); 961 raise(SIGUSR2); 962 963 event_dispatch(); 964 965 evsignal_del(&sig_ev); 966 tt_int_op(test_ok, ==, 1); 967 968 end: 969 cleanup_test(); 970 if (child_pair[0] != -1) 971 evutil_closesocket(child_pair[0]); 972 if (child_pair[1] != -1) 973 evutil_closesocket(child_pair[1]); 974 } 975 976 #ifdef EVENT__HAVE_PTHREADS 977 static void* del_wait_thread(void *arg) 978 { 979 struct timeval tv_start, tv_end; 980 981 evutil_gettimeofday(&tv_start, NULL); 982 event_dispatch(); 983 evutil_gettimeofday(&tv_end, NULL); 984 985 test_timeval_diff_eq(&tv_start, &tv_end, 300); 986 987 end: 988 return NULL; 989 } 990 991 static void 992 del_wait_cb(evutil_socket_t fd, short event, void *arg) 993 { 994 struct timeval delay = { 0, 300*1000 }; 995 TT_BLATHER(("Sleeping")); 996 evutil_usleep_(&delay); 997 test_ok = 1; 998 } 999 1000 static void 1001 test_del_wait(void) 1002 { 1003 struct event ev; 1004 pthread_t thread; 1005 1006 setup_test("event_del will wait: "); 1007 1008 event_set(&ev, pair[1], EV_READ, del_wait_cb, &ev); 1009 event_add(&ev, NULL); 1010 1011 pthread_create(&thread, NULL, del_wait_thread, NULL); 1012 1013 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 1014 tt_fail_perror("write"); 1015 } 1016 1017 { 1018 struct timeval delay = { 0, 30*1000 }; 1019 evutil_usleep_(&delay); 1020 } 1021 1022 { 1023 struct timeval tv_start, tv_end; 1024 evutil_gettimeofday(&tv_start, NULL); 1025 event_del(&ev); 1026 evutil_gettimeofday(&tv_end, NULL); 1027 test_timeval_diff_eq(&tv_start, &tv_end, 270); 1028 } 1029 1030 pthread_join(thread, NULL); 1031 1032 end: 1033 ; 1034 } 1035 #endif 1036 1037 static void 1038 signal_cb_sa(int sig) 1039 { 1040 test_ok = 2; 1041 } 1042 1043 static void 1044 signal_cb(evutil_socket_t fd, short event, void *arg) 1045 { 1046 struct event *ev = arg; 1047 1048 evsignal_del(ev); 1049 test_ok = 1; 1050 } 1051 1052 static void 1053 test_simplesignal_impl(int find_reorder) 1054 { 1055 struct event ev; 1056 struct itimerval itv; 1057 1058 evsignal_set(&ev, SIGALRM, signal_cb, &ev); 1059 evsignal_add(&ev, NULL); 1060 /* find bugs in which operations are re-ordered */ 1061 if (find_reorder) { 1062 evsignal_del(&ev); 1063 evsignal_add(&ev, NULL); 1064 } 1065 1066 memset(&itv, 0, sizeof(itv)); 1067 itv.it_value.tv_sec = 0; 1068 itv.it_value.tv_usec = 100000; 1069 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 1070 goto skip_simplesignal; 1071 1072 event_dispatch(); 1073 skip_simplesignal: 1074 if (evsignal_del(&ev) == -1) 1075 test_ok = 0; 1076 1077 cleanup_test(); 1078 } 1079 1080 static void 1081 test_simplestsignal(void) 1082 { 1083 setup_test("Simplest one signal: "); 1084 test_simplesignal_impl(0); 1085 } 1086 1087 static void 1088 test_simplesignal(void) 1089 { 1090 setup_test("Simple signal: "); 1091 test_simplesignal_impl(1); 1092 } 1093 1094 static void 1095 test_multiplesignal(void) 1096 { 1097 struct event ev_one, ev_two; 1098 struct itimerval itv; 1099 1100 setup_test("Multiple signal: "); 1101 1102 evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one); 1103 evsignal_add(&ev_one, NULL); 1104 1105 evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two); 1106 evsignal_add(&ev_two, NULL); 1107 1108 memset(&itv, 0, sizeof(itv)); 1109 itv.it_value.tv_sec = 0; 1110 itv.it_value.tv_usec = 100000; 1111 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 1112 goto skip_simplesignal; 1113 1114 event_dispatch(); 1115 1116 skip_simplesignal: 1117 if (evsignal_del(&ev_one) == -1) 1118 test_ok = 0; 1119 if (evsignal_del(&ev_two) == -1) 1120 test_ok = 0; 1121 1122 cleanup_test(); 1123 } 1124 1125 static void 1126 test_immediatesignal(void) 1127 { 1128 struct event ev; 1129 1130 test_ok = 0; 1131 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1132 evsignal_add(&ev, NULL); 1133 raise(SIGUSR1); 1134 event_loop(EVLOOP_NONBLOCK); 1135 evsignal_del(&ev); 1136 cleanup_test(); 1137 } 1138 1139 static void 1140 test_signal_dealloc(void) 1141 { 1142 /* make sure that evsignal_event is event_del'ed and pipe closed */ 1143 struct event ev; 1144 struct event_base *base = event_init(); 1145 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1146 evsignal_add(&ev, NULL); 1147 evsignal_del(&ev); 1148 event_base_free(base); 1149 /* If we got here without asserting, we're fine. */ 1150 test_ok = 1; 1151 cleanup_test(); 1152 } 1153 1154 static void 1155 test_signal_pipeloss(void) 1156 { 1157 /* make sure that the base1 pipe is closed correctly. */ 1158 struct event_base *base1, *base2; 1159 int pipe1; 1160 test_ok = 0; 1161 base1 = event_init(); 1162 pipe1 = base1->sig.ev_signal_pair[0]; 1163 base2 = event_init(); 1164 event_base_free(base2); 1165 event_base_free(base1); 1166 if (close(pipe1) != -1 || errno!=EBADF) { 1167 /* fd must be closed, so second close gives -1, EBADF */ 1168 printf("signal pipe not closed. "); 1169 test_ok = 0; 1170 } else { 1171 test_ok = 1; 1172 } 1173 cleanup_test(); 1174 } 1175 1176 /* 1177 * make two bases to catch signals, use both of them. this only works 1178 * for event mechanisms that use our signal pipe trick. kqueue handles 1179 * signals internally, and all interested kqueues get all the signals. 1180 */ 1181 static void 1182 test_signal_switchbase(void) 1183 { 1184 struct event ev1, ev2; 1185 struct event_base *base1, *base2; 1186 int is_kqueue; 1187 test_ok = 0; 1188 base1 = event_init(); 1189 base2 = event_init(); 1190 is_kqueue = !strcmp(event_get_method(),"kqueue"); 1191 evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1); 1192 evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2); 1193 if (event_base_set(base1, &ev1) || 1194 event_base_set(base2, &ev2) || 1195 event_add(&ev1, NULL) || 1196 event_add(&ev2, NULL)) { 1197 fprintf(stderr, "%s: cannot set base, add\n", __func__); 1198 exit(1); 1199 } 1200 1201 tt_ptr_op(event_get_base(&ev1), ==, base1); 1202 tt_ptr_op(event_get_base(&ev2), ==, base2); 1203 1204 test_ok = 0; 1205 /* can handle signal before loop is called */ 1206 raise(SIGUSR1); 1207 event_base_loop(base2, EVLOOP_NONBLOCK); 1208 if (is_kqueue) { 1209 if (!test_ok) 1210 goto end; 1211 test_ok = 0; 1212 } 1213 event_base_loop(base1, EVLOOP_NONBLOCK); 1214 if (test_ok && !is_kqueue) { 1215 test_ok = 0; 1216 1217 /* set base1 to handle signals */ 1218 event_base_loop(base1, EVLOOP_NONBLOCK); 1219 raise(SIGUSR1); 1220 event_base_loop(base1, EVLOOP_NONBLOCK); 1221 event_base_loop(base2, EVLOOP_NONBLOCK); 1222 } 1223 end: 1224 event_base_free(base1); 1225 event_base_free(base2); 1226 cleanup_test(); 1227 } 1228 1229 /* 1230 * assert that a signal event removed from the event queue really is 1231 * removed - with no possibility of it's parent handler being fired. 1232 */ 1233 static void 1234 test_signal_assert(void) 1235 { 1236 struct event ev; 1237 struct event_base *base = event_init(); 1238 test_ok = 0; 1239 /* use SIGCONT so we don't kill ourselves when we signal to nowhere */ 1240 evsignal_set(&ev, SIGCONT, signal_cb, &ev); 1241 evsignal_add(&ev, NULL); 1242 /* 1243 * if evsignal_del() fails to reset the handler, it's current handler 1244 * will still point to evsig_handler(). 1245 */ 1246 evsignal_del(&ev); 1247 1248 raise(SIGCONT); 1249 #if 0 1250 /* only way to verify we were in evsig_handler() */ 1251 /* XXXX Now there's no longer a good way. */ 1252 if (base->sig.evsig_caught) 1253 test_ok = 0; 1254 else 1255 test_ok = 1; 1256 #else 1257 test_ok = 1; 1258 #endif 1259 1260 event_base_free(base); 1261 cleanup_test(); 1262 return; 1263 } 1264 1265 /* 1266 * assert that we restore our previous signal handler properly. 1267 */ 1268 static void 1269 test_signal_restore(void) 1270 { 1271 struct event ev; 1272 struct event_base *base = event_init(); 1273 #ifdef EVENT__HAVE_SIGACTION 1274 struct sigaction sa; 1275 #endif 1276 1277 test_ok = 0; 1278 #ifdef EVENT__HAVE_SIGACTION 1279 sa.sa_handler = signal_cb_sa; 1280 sa.sa_flags = 0x0; 1281 sigemptyset(&sa.sa_mask); 1282 if (sigaction(SIGUSR1, &sa, NULL) == -1) 1283 goto out; 1284 #else 1285 if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR) 1286 goto out; 1287 #endif 1288 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1289 evsignal_add(&ev, NULL); 1290 evsignal_del(&ev); 1291 1292 raise(SIGUSR1); 1293 /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */ 1294 if (test_ok != 2) 1295 test_ok = 0; 1296 out: 1297 event_base_free(base); 1298 cleanup_test(); 1299 return; 1300 } 1301 1302 static void 1303 signal_cb_swp(int sig, short event, void *arg) 1304 { 1305 called++; 1306 if (called < 5) 1307 raise(sig); 1308 else 1309 event_loopexit(NULL); 1310 } 1311 static void 1312 timeout_cb_swp(evutil_socket_t fd, short event, void *arg) 1313 { 1314 if (called == -1) { 1315 struct timeval tv = {5, 0}; 1316 1317 called = 0; 1318 evtimer_add((struct event *)arg, &tv); 1319 raise(SIGUSR1); 1320 return; 1321 } 1322 test_ok = 0; 1323 event_loopexit(NULL); 1324 } 1325 1326 static void 1327 test_signal_while_processing(void) 1328 { 1329 struct event_base *base = event_init(); 1330 struct event ev, ev_timer; 1331 struct timeval tv = {0, 0}; 1332 1333 setup_test("Receiving a signal while processing other signal: "); 1334 1335 called = -1; 1336 test_ok = 1; 1337 signal_set(&ev, SIGUSR1, signal_cb_swp, NULL); 1338 signal_add(&ev, NULL); 1339 evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer); 1340 evtimer_add(&ev_timer, &tv); 1341 event_dispatch(); 1342 1343 event_base_free(base); 1344 cleanup_test(); 1345 return; 1346 } 1347 #endif 1348 1349 static void 1350 test_free_active_base(void *ptr) 1351 { 1352 struct basic_test_data *data = ptr; 1353 struct event_base *base1; 1354 struct event ev1; 1355 1356 base1 = event_init(); 1357 if (base1) { 1358 event_assign(&ev1, base1, data->pair[1], EV_READ, 1359 dummy_read_cb, NULL); 1360 event_add(&ev1, NULL); 1361 event_base_free(base1); /* should not crash */ 1362 } else { 1363 tt_fail_msg("failed to create event_base for test"); 1364 } 1365 1366 base1 = event_init(); 1367 tt_assert(base1); 1368 event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL); 1369 event_active(&ev1, EV_READ, 1); 1370 event_base_free(base1); 1371 end: 1372 ; 1373 } 1374 1375 static void 1376 test_manipulate_active_events(void *ptr) 1377 { 1378 struct basic_test_data *data = ptr; 1379 struct event_base *base = data->base; 1380 struct event ev1; 1381 1382 event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL); 1383 1384 /* Make sure an active event is pending. */ 1385 event_active(&ev1, EV_READ, 1); 1386 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1387 ==, EV_READ); 1388 1389 /* Make sure that activating an event twice works. */ 1390 event_active(&ev1, EV_WRITE, 1); 1391 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1392 ==, EV_READ|EV_WRITE); 1393 1394 end: 1395 event_del(&ev1); 1396 } 1397 1398 static void 1399 event_selfarg_cb(evutil_socket_t fd, short event, void *arg) 1400 { 1401 struct event *ev = arg; 1402 struct event_base *base = event_get_base(ev); 1403 event_base_assert_ok_(base); 1404 event_base_loopexit(base, NULL); 1405 tt_want(ev == event_base_get_running_event(base)); 1406 } 1407 1408 static void 1409 test_event_new_selfarg(void *ptr) 1410 { 1411 struct basic_test_data *data = ptr; 1412 struct event_base *base = data->base; 1413 struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb, 1414 event_self_cbarg()); 1415 1416 event_active(ev, EV_READ, 1); 1417 event_base_dispatch(base); 1418 1419 event_free(ev); 1420 } 1421 1422 static void 1423 test_event_assign_selfarg(void *ptr) 1424 { 1425 struct basic_test_data *data = ptr; 1426 struct event_base *base = data->base; 1427 struct event ev; 1428 1429 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1430 event_self_cbarg()); 1431 event_active(&ev, EV_READ, 1); 1432 event_base_dispatch(base); 1433 } 1434 1435 static void 1436 test_event_base_get_num_events(void *ptr) 1437 { 1438 struct basic_test_data *data = ptr; 1439 struct event_base *base = data->base; 1440 struct event ev; 1441 int event_count_active; 1442 int event_count_virtual; 1443 int event_count_added; 1444 int event_count_active_virtual; 1445 int event_count_active_added; 1446 int event_count_virtual_added; 1447 int event_count_active_added_virtual; 1448 1449 struct timeval qsec = {0, 100000}; 1450 1451 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1452 event_self_cbarg()); 1453 1454 event_add(&ev, &qsec); 1455 event_count_active = event_base_get_num_events(base, 1456 EVENT_BASE_COUNT_ACTIVE); 1457 event_count_virtual = event_base_get_num_events(base, 1458 EVENT_BASE_COUNT_VIRTUAL); 1459 event_count_added = event_base_get_num_events(base, 1460 EVENT_BASE_COUNT_ADDED); 1461 event_count_active_virtual = event_base_get_num_events(base, 1462 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1463 event_count_active_added = event_base_get_num_events(base, 1464 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1465 event_count_virtual_added = event_base_get_num_events(base, 1466 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1467 event_count_active_added_virtual = event_base_get_num_events(base, 1468 EVENT_BASE_COUNT_ACTIVE| 1469 EVENT_BASE_COUNT_ADDED| 1470 EVENT_BASE_COUNT_VIRTUAL); 1471 tt_int_op(event_count_active, ==, 0); 1472 tt_int_op(event_count_virtual, ==, 0); 1473 /* libevent itself adds a timeout event, so the event_count is 2 here */ 1474 tt_int_op(event_count_added, ==, 2); 1475 tt_int_op(event_count_active_virtual, ==, 0); 1476 tt_int_op(event_count_active_added, ==, 2); 1477 tt_int_op(event_count_virtual_added, ==, 2); 1478 tt_int_op(event_count_active_added_virtual, ==, 2); 1479 1480 event_active(&ev, EV_READ, 1); 1481 event_count_active = event_base_get_num_events(base, 1482 EVENT_BASE_COUNT_ACTIVE); 1483 event_count_virtual = event_base_get_num_events(base, 1484 EVENT_BASE_COUNT_VIRTUAL); 1485 event_count_added = event_base_get_num_events(base, 1486 EVENT_BASE_COUNT_ADDED); 1487 event_count_active_virtual = event_base_get_num_events(base, 1488 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1489 event_count_active_added = event_base_get_num_events(base, 1490 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1491 event_count_virtual_added = event_base_get_num_events(base, 1492 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1493 event_count_active_added_virtual = event_base_get_num_events(base, 1494 EVENT_BASE_COUNT_ACTIVE| 1495 EVENT_BASE_COUNT_ADDED| 1496 EVENT_BASE_COUNT_VIRTUAL); 1497 tt_int_op(event_count_active, ==, 1); 1498 tt_int_op(event_count_virtual, ==, 0); 1499 tt_int_op(event_count_added, ==, 3); 1500 tt_int_op(event_count_active_virtual, ==, 1); 1501 tt_int_op(event_count_active_added, ==, 4); 1502 tt_int_op(event_count_virtual_added, ==, 3); 1503 tt_int_op(event_count_active_added_virtual, ==, 4); 1504 1505 event_base_loop(base, 0); 1506 event_count_active = event_base_get_num_events(base, 1507 EVENT_BASE_COUNT_ACTIVE); 1508 event_count_virtual = event_base_get_num_events(base, 1509 EVENT_BASE_COUNT_VIRTUAL); 1510 event_count_added = event_base_get_num_events(base, 1511 EVENT_BASE_COUNT_ADDED); 1512 event_count_active_virtual = event_base_get_num_events(base, 1513 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1514 event_count_active_added = event_base_get_num_events(base, 1515 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1516 event_count_virtual_added = event_base_get_num_events(base, 1517 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1518 event_count_active_added_virtual = event_base_get_num_events(base, 1519 EVENT_BASE_COUNT_ACTIVE| 1520 EVENT_BASE_COUNT_ADDED| 1521 EVENT_BASE_COUNT_VIRTUAL); 1522 tt_int_op(event_count_active, ==, 0); 1523 tt_int_op(event_count_virtual, ==, 0); 1524 tt_int_op(event_count_added, ==, 0); 1525 tt_int_op(event_count_active_virtual, ==, 0); 1526 tt_int_op(event_count_active_added, ==, 0); 1527 tt_int_op(event_count_virtual_added, ==, 0); 1528 tt_int_op(event_count_active_added_virtual, ==, 0); 1529 1530 event_base_add_virtual_(base); 1531 event_count_active = event_base_get_num_events(base, 1532 EVENT_BASE_COUNT_ACTIVE); 1533 event_count_virtual = event_base_get_num_events(base, 1534 EVENT_BASE_COUNT_VIRTUAL); 1535 event_count_added = event_base_get_num_events(base, 1536 EVENT_BASE_COUNT_ADDED); 1537 event_count_active_virtual = event_base_get_num_events(base, 1538 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1539 event_count_active_added = event_base_get_num_events(base, 1540 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1541 event_count_virtual_added = event_base_get_num_events(base, 1542 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1543 event_count_active_added_virtual = event_base_get_num_events(base, 1544 EVENT_BASE_COUNT_ACTIVE| 1545 EVENT_BASE_COUNT_ADDED| 1546 EVENT_BASE_COUNT_VIRTUAL); 1547 tt_int_op(event_count_active, ==, 0); 1548 tt_int_op(event_count_virtual, ==, 1); 1549 tt_int_op(event_count_added, ==, 0); 1550 tt_int_op(event_count_active_virtual, ==, 1); 1551 tt_int_op(event_count_active_added, ==, 0); 1552 tt_int_op(event_count_virtual_added, ==, 1); 1553 tt_int_op(event_count_active_added_virtual, ==, 1); 1554 1555 end: 1556 ; 1557 } 1558 1559 static void 1560 test_event_base_get_max_events(void *ptr) 1561 { 1562 struct basic_test_data *data = ptr; 1563 struct event_base *base = data->base; 1564 struct event ev; 1565 struct event ev2; 1566 int event_count_active; 1567 int event_count_virtual; 1568 int event_count_added; 1569 int event_count_active_virtual; 1570 int event_count_active_added; 1571 int event_count_virtual_added; 1572 int event_count_active_added_virtual; 1573 1574 struct timeval qsec = {0, 100000}; 1575 1576 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1577 event_self_cbarg()); 1578 event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb, 1579 event_self_cbarg()); 1580 1581 event_add(&ev, &qsec); 1582 event_add(&ev2, &qsec); 1583 event_del(&ev2); 1584 1585 event_count_active = event_base_get_max_events(base, 1586 EVENT_BASE_COUNT_ACTIVE, 0); 1587 event_count_virtual = event_base_get_max_events(base, 1588 EVENT_BASE_COUNT_VIRTUAL, 0); 1589 event_count_added = event_base_get_max_events(base, 1590 EVENT_BASE_COUNT_ADDED, 0); 1591 event_count_active_virtual = event_base_get_max_events(base, 1592 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1593 event_count_active_added = event_base_get_max_events(base, 1594 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1595 event_count_virtual_added = event_base_get_max_events(base, 1596 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1597 event_count_active_added_virtual = event_base_get_max_events(base, 1598 EVENT_BASE_COUNT_ACTIVE | 1599 EVENT_BASE_COUNT_ADDED | 1600 EVENT_BASE_COUNT_VIRTUAL, 0); 1601 1602 tt_int_op(event_count_active, ==, 0); 1603 tt_int_op(event_count_virtual, ==, 0); 1604 /* libevent itself adds a timeout event, so the event_count is 4 here */ 1605 tt_int_op(event_count_added, ==, 4); 1606 tt_int_op(event_count_active_virtual, ==, 0); 1607 tt_int_op(event_count_active_added, ==, 4); 1608 tt_int_op(event_count_virtual_added, ==, 4); 1609 tt_int_op(event_count_active_added_virtual, ==, 4); 1610 1611 event_active(&ev, EV_READ, 1); 1612 event_count_active = event_base_get_max_events(base, 1613 EVENT_BASE_COUNT_ACTIVE, 0); 1614 event_count_virtual = event_base_get_max_events(base, 1615 EVENT_BASE_COUNT_VIRTUAL, 0); 1616 event_count_added = event_base_get_max_events(base, 1617 EVENT_BASE_COUNT_ADDED, 0); 1618 event_count_active_virtual = event_base_get_max_events(base, 1619 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1620 event_count_active_added = event_base_get_max_events(base, 1621 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1622 event_count_virtual_added = event_base_get_max_events(base, 1623 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1624 event_count_active_added_virtual = event_base_get_max_events(base, 1625 EVENT_BASE_COUNT_ACTIVE | 1626 EVENT_BASE_COUNT_ADDED | 1627 EVENT_BASE_COUNT_VIRTUAL, 0); 1628 1629 tt_int_op(event_count_active, ==, 1); 1630 tt_int_op(event_count_virtual, ==, 0); 1631 tt_int_op(event_count_added, ==, 4); 1632 tt_int_op(event_count_active_virtual, ==, 1); 1633 tt_int_op(event_count_active_added, ==, 5); 1634 tt_int_op(event_count_virtual_added, ==, 4); 1635 tt_int_op(event_count_active_added_virtual, ==, 5); 1636 1637 event_base_loop(base, 0); 1638 event_count_active = event_base_get_max_events(base, 1639 EVENT_BASE_COUNT_ACTIVE, 1); 1640 event_count_virtual = event_base_get_max_events(base, 1641 EVENT_BASE_COUNT_VIRTUAL, 1); 1642 event_count_added = event_base_get_max_events(base, 1643 EVENT_BASE_COUNT_ADDED, 1); 1644 event_count_active_virtual = event_base_get_max_events(base, 1645 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1646 event_count_active_added = event_base_get_max_events(base, 1647 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1648 event_count_virtual_added = event_base_get_max_events(base, 1649 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1650 event_count_active_added_virtual = event_base_get_max_events(base, 1651 EVENT_BASE_COUNT_ACTIVE | 1652 EVENT_BASE_COUNT_ADDED | 1653 EVENT_BASE_COUNT_VIRTUAL, 1); 1654 1655 tt_int_op(event_count_active, ==, 1); 1656 tt_int_op(event_count_virtual, ==, 0); 1657 tt_int_op(event_count_added, ==, 4); 1658 tt_int_op(event_count_active_virtual, ==, 0); 1659 tt_int_op(event_count_active_added, ==, 0); 1660 tt_int_op(event_count_virtual_added, ==, 0); 1661 tt_int_op(event_count_active_added_virtual, ==, 0); 1662 1663 event_count_active = event_base_get_max_events(base, 1664 EVENT_BASE_COUNT_ACTIVE, 0); 1665 event_count_virtual = event_base_get_max_events(base, 1666 EVENT_BASE_COUNT_VIRTUAL, 0); 1667 event_count_added = event_base_get_max_events(base, 1668 EVENT_BASE_COUNT_ADDED, 0); 1669 tt_int_op(event_count_active, ==, 0); 1670 tt_int_op(event_count_virtual, ==, 0); 1671 tt_int_op(event_count_added, ==, 0); 1672 1673 event_base_add_virtual_(base); 1674 event_count_active = event_base_get_max_events(base, 1675 EVENT_BASE_COUNT_ACTIVE, 0); 1676 event_count_virtual = event_base_get_max_events(base, 1677 EVENT_BASE_COUNT_VIRTUAL, 0); 1678 event_count_added = event_base_get_max_events(base, 1679 EVENT_BASE_COUNT_ADDED, 0); 1680 event_count_active_virtual = event_base_get_max_events(base, 1681 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1682 event_count_active_added = event_base_get_max_events(base, 1683 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1684 event_count_virtual_added = event_base_get_max_events(base, 1685 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1686 event_count_active_added_virtual = event_base_get_max_events(base, 1687 EVENT_BASE_COUNT_ACTIVE | 1688 EVENT_BASE_COUNT_ADDED | 1689 EVENT_BASE_COUNT_VIRTUAL, 0); 1690 1691 tt_int_op(event_count_active, ==, 0); 1692 tt_int_op(event_count_virtual, ==, 1); 1693 tt_int_op(event_count_added, ==, 0); 1694 tt_int_op(event_count_active_virtual, ==, 1); 1695 tt_int_op(event_count_active_added, ==, 0); 1696 tt_int_op(event_count_virtual_added, ==, 1); 1697 tt_int_op(event_count_active_added_virtual, ==, 1); 1698 1699 end: 1700 ; 1701 } 1702 1703 static void 1704 test_bad_assign(void *ptr) 1705 { 1706 struct event ev; 1707 int r; 1708 /* READ|SIGNAL is not allowed */ 1709 r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL); 1710 tt_int_op(r,==,-1); 1711 1712 end: 1713 ; 1714 } 1715 1716 static int reentrant_cb_run = 0; 1717 1718 static void 1719 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr) 1720 { 1721 struct event_base *base = ptr; 1722 int r; 1723 reentrant_cb_run = 1; 1724 /* This reentrant call to event_base_loop should be detected and 1725 * should fail */ 1726 r = event_base_loop(base, 0); 1727 tt_int_op(r, ==, -1); 1728 end: 1729 ; 1730 } 1731 1732 static void 1733 test_bad_reentrant(void *ptr) 1734 { 1735 struct basic_test_data *data = ptr; 1736 struct event_base *base = data->base; 1737 struct event ev; 1738 int r; 1739 event_assign(&ev, base, -1, 1740 0, bad_reentrant_run_loop_cb, base); 1741 1742 event_active(&ev, EV_WRITE, 1); 1743 r = event_base_loop(base, 0); 1744 tt_int_op(r, ==, 1); 1745 tt_int_op(reentrant_cb_run, ==, 1); 1746 end: 1747 ; 1748 } 1749 1750 static int n_write_a_byte_cb=0; 1751 static int n_read_and_drain_cb=0; 1752 static int n_activate_other_event_cb=0; 1753 static void 1754 write_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1755 { 1756 char buf[] = "x"; 1757 if (write(fd, buf, 1) == 1) 1758 ++n_write_a_byte_cb; 1759 } 1760 static void 1761 read_and_drain_cb(evutil_socket_t fd, short what, void *arg) 1762 { 1763 char buf[128]; 1764 int n; 1765 ++n_read_and_drain_cb; 1766 while ((n = read(fd, buf, sizeof(buf))) > 0) 1767 ; 1768 } 1769 1770 static void 1771 activate_other_event_cb(evutil_socket_t fd, short what, void *other_) 1772 { 1773 struct event *ev_activate = other_; 1774 ++n_activate_other_event_cb; 1775 event_active_later_(ev_activate, EV_READ); 1776 } 1777 1778 static void 1779 test_active_later(void *ptr) 1780 { 1781 struct basic_test_data *data = ptr; 1782 struct event *ev1 = NULL, *ev2 = NULL; 1783 struct event ev3, ev4; 1784 struct timeval qsec = {0, 100000}; 1785 ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL); 1786 ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL); 1787 event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4); 1788 event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3); 1789 event_add(ev1, NULL); 1790 event_add(ev2, NULL); 1791 event_active_later_(&ev3, EV_READ); 1792 1793 event_base_loopexit(data->base, &qsec); 1794 1795 event_base_loop(data->base, 0); 1796 1797 TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.", 1798 n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb)); 1799 event_del(&ev3); 1800 event_del(&ev4); 1801 1802 tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb); 1803 tt_int_op(n_write_a_byte_cb, >, 100); 1804 tt_int_op(n_read_and_drain_cb, >, 100); 1805 tt_int_op(n_activate_other_event_cb, >, 100); 1806 1807 event_active_later_(&ev4, EV_READ); 1808 event_active(&ev4, EV_READ, 1); /* This should make the event 1809 active immediately. */ 1810 tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0); 1811 tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0); 1812 1813 /* Now leave this one around, so that event_free sees it and removes 1814 * it. */ 1815 event_active_later_(&ev3, EV_READ); 1816 event_base_assert_ok_(data->base); 1817 1818 end: 1819 if (ev1) 1820 event_free(ev1); 1821 if (ev2) 1822 event_free(ev2); 1823 1824 event_base_free(data->base); 1825 data->base = NULL; 1826 } 1827 1828 1829 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg) 1830 { 1831 int *intptr = arg; 1832 (void) fd; (void) what; 1833 ++*intptr; 1834 } 1835 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg) 1836 { 1837 struct event **ep = arg; 1838 (void) fd; (void) what; 1839 event_remove_timer(ep[0]); 1840 event_remove_timer(ep[1]); 1841 } 1842 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1843 { 1844 evutil_socket_t *sockp = arg; 1845 (void) fd; (void) what; 1846 (void) write(*sockp, "A", 1); 1847 } 1848 struct read_not_timeout_param 1849 { 1850 struct event **ev; 1851 int events; 1852 int count; 1853 }; 1854 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg) 1855 { 1856 struct read_not_timeout_param *rntp = arg; 1857 char c; 1858 ev_ssize_t n; 1859 (void) fd; (void) what; 1860 n = read(fd, &c, 1); 1861 tt_int_op(n, ==, 1); 1862 rntp->events |= what; 1863 ++rntp->count; 1864 if(2 == rntp->count) event_del(rntp->ev[0]); 1865 end: 1866 ; 1867 } 1868 1869 static void 1870 test_event_remove_timeout(void *ptr) 1871 { 1872 struct basic_test_data *data = ptr; 1873 struct event_base *base = data->base; 1874 struct event *ev[5]; 1875 int ev1_fired=0; 1876 struct timeval ms25 = { 0, 25*1000 }, 1877 ms40 = { 0, 40*1000 }, 1878 ms75 = { 0, 75*1000 }, 1879 ms125 = { 0, 125*1000 }; 1880 struct read_not_timeout_param rntp = { ev, 0, 0 }; 1881 1882 event_base_assert_ok_(base); 1883 1884 ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST, 1885 read_not_timeout_cb, &rntp); 1886 ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired); 1887 ev[2] = evtimer_new(base, remove_timers_cb, ev); 1888 ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1889 ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1890 tt_assert(base); 1891 event_add(ev[2], &ms25); /* remove timers */ 1892 event_add(ev[4], &ms40); /* write to test if timer re-activates */ 1893 event_add(ev[0], &ms75); /* read */ 1894 event_add(ev[1], &ms75); /* timer */ 1895 event_add(ev[3], &ms125); /* timeout. */ 1896 event_base_assert_ok_(base); 1897 1898 event_base_dispatch(base); 1899 1900 tt_int_op(ev1_fired, ==, 0); 1901 tt_int_op(rntp.events, ==, EV_READ); 1902 1903 event_base_assert_ok_(base); 1904 end: 1905 event_free(ev[0]); 1906 event_free(ev[1]); 1907 event_free(ev[2]); 1908 event_free(ev[3]); 1909 event_free(ev[4]); 1910 } 1911 1912 static void 1913 test_event_base_new(void *ptr) 1914 { 1915 struct basic_test_data *data = ptr; 1916 struct event_base *base = 0; 1917 struct event ev1; 1918 struct basic_cb_args args; 1919 1920 int towrite = (int)strlen(TEST1)+1; 1921 int len = write(data->pair[0], TEST1, towrite); 1922 1923 if (len < 0) 1924 tt_abort_perror("initial write"); 1925 else if (len != towrite) 1926 tt_abort_printf(("initial write fell short (%d of %d bytes)", 1927 len, towrite)); 1928 1929 if (shutdown(data->pair[0], EVUTIL_SHUT_WR)) 1930 tt_abort_perror("initial write shutdown"); 1931 1932 base = event_base_new(); 1933 if (!base) 1934 tt_abort_msg("failed to create event base"); 1935 1936 args.eb = base; 1937 args.ev = &ev1; 1938 args.callcount = 0; 1939 event_assign(&ev1, base, data->pair[1], 1940 EV_READ|EV_PERSIST, basic_read_cb, &args); 1941 1942 if (event_add(&ev1, NULL)) 1943 tt_abort_perror("initial event_add"); 1944 1945 if (event_base_loop(base, 0)) 1946 tt_abort_msg("unsuccessful exit from event loop"); 1947 1948 end: 1949 if (base) 1950 event_base_free(base); 1951 } 1952 1953 static void 1954 test_loopexit(void) 1955 { 1956 struct timeval tv, tv_start, tv_end; 1957 struct event ev; 1958 1959 setup_test("Loop exit: "); 1960 1961 tv.tv_usec = 0; 1962 tv.tv_sec = 60*60*24; 1963 evtimer_set(&ev, timeout_cb, NULL); 1964 evtimer_add(&ev, &tv); 1965 1966 tv.tv_usec = 300*1000; 1967 tv.tv_sec = 0; 1968 event_loopexit(&tv); 1969 1970 evutil_gettimeofday(&tv_start, NULL); 1971 event_dispatch(); 1972 evutil_gettimeofday(&tv_end, NULL); 1973 1974 evtimer_del(&ev); 1975 1976 tt_assert(event_base_got_exit(global_base)); 1977 tt_assert(!event_base_got_break(global_base)); 1978 1979 test_timeval_diff_eq(&tv_start, &tv_end, 300); 1980 1981 test_ok = 1; 1982 end: 1983 cleanup_test(); 1984 } 1985 1986 static void 1987 test_loopexit_multiple(void) 1988 { 1989 struct timeval tv, tv_start, tv_end; 1990 struct event_base *base; 1991 1992 setup_test("Loop Multiple exit: "); 1993 1994 base = event_base_new(); 1995 1996 tv.tv_usec = 200*1000; 1997 tv.tv_sec = 0; 1998 event_base_loopexit(base, &tv); 1999 2000 tv.tv_usec = 0; 2001 tv.tv_sec = 3; 2002 event_base_loopexit(base, &tv); 2003 2004 evutil_gettimeofday(&tv_start, NULL); 2005 event_base_dispatch(base); 2006 evutil_gettimeofday(&tv_end, NULL); 2007 2008 tt_assert(event_base_got_exit(base)); 2009 tt_assert(!event_base_got_break(base)); 2010 2011 event_base_free(base); 2012 2013 test_timeval_diff_eq(&tv_start, &tv_end, 200); 2014 2015 test_ok = 1; 2016 2017 end: 2018 cleanup_test(); 2019 } 2020 2021 static void 2022 break_cb(evutil_socket_t fd, short events, void *arg) 2023 { 2024 test_ok = 1; 2025 event_loopbreak(); 2026 } 2027 2028 static void 2029 fail_cb(evutil_socket_t fd, short events, void *arg) 2030 { 2031 test_ok = 0; 2032 } 2033 2034 static void 2035 test_loopbreak(void) 2036 { 2037 struct event ev1, ev2; 2038 struct timeval tv; 2039 2040 setup_test("Loop break: "); 2041 2042 tv.tv_sec = 0; 2043 tv.tv_usec = 0; 2044 evtimer_set(&ev1, break_cb, NULL); 2045 evtimer_add(&ev1, &tv); 2046 evtimer_set(&ev2, fail_cb, NULL); 2047 evtimer_add(&ev2, &tv); 2048 2049 event_dispatch(); 2050 2051 tt_assert(!event_base_got_exit(global_base)); 2052 tt_assert(event_base_got_break(global_base)); 2053 2054 evtimer_del(&ev1); 2055 evtimer_del(&ev2); 2056 2057 end: 2058 cleanup_test(); 2059 } 2060 2061 static struct event *readd_test_event_last_added = NULL; 2062 static void 2063 re_add_read_cb(evutil_socket_t fd, short event, void *arg) 2064 { 2065 char buf[256]; 2066 struct event *ev_other = arg; 2067 ev_ssize_t n_read; 2068 2069 readd_test_event_last_added = ev_other; 2070 2071 n_read = read(fd, buf, sizeof(buf)); 2072 2073 if (n_read < 0) { 2074 tt_fail_perror("read"); 2075 event_base_loopbreak(event_get_base(ev_other)); 2076 return; 2077 } else { 2078 event_add(ev_other, NULL); 2079 ++test_ok; 2080 } 2081 } 2082 2083 static void 2084 test_nonpersist_readd(void) 2085 { 2086 struct event ev1, ev2; 2087 2088 setup_test("Re-add nonpersistent events: "); 2089 event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2); 2090 event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1); 2091 2092 if (write(pair[0], "Hello", 5) < 0) { 2093 tt_fail_perror("write(pair[0])"); 2094 } 2095 2096 if (write(pair[1], "Hello", 5) < 0) { 2097 tt_fail_perror("write(pair[1])\n"); 2098 } 2099 2100 if (event_add(&ev1, NULL) == -1 || 2101 event_add(&ev2, NULL) == -1) { 2102 test_ok = 0; 2103 } 2104 if (test_ok != 0) 2105 exit(1); 2106 event_loop(EVLOOP_ONCE); 2107 if (test_ok != 2) 2108 exit(1); 2109 /* At this point, we executed both callbacks. Whichever one got 2110 * called first added the second, but the second then immediately got 2111 * deleted before its callback was called. At this point, though, it 2112 * re-added the first. 2113 */ 2114 if (!readd_test_event_last_added) { 2115 test_ok = 0; 2116 } else if (readd_test_event_last_added == &ev1) { 2117 if (!event_pending(&ev1, EV_READ, NULL) || 2118 event_pending(&ev2, EV_READ, NULL)) 2119 test_ok = 0; 2120 } else { 2121 if (event_pending(&ev1, EV_READ, NULL) || 2122 !event_pending(&ev2, EV_READ, NULL)) 2123 test_ok = 0; 2124 } 2125 2126 event_del(&ev1); 2127 event_del(&ev2); 2128 2129 cleanup_test(); 2130 } 2131 2132 struct test_pri_event { 2133 struct event ev; 2134 int count; 2135 }; 2136 2137 static void 2138 test_priorities_cb(evutil_socket_t fd, short what, void *arg) 2139 { 2140 struct test_pri_event *pri = arg; 2141 struct timeval tv; 2142 2143 if (pri->count == 3) { 2144 event_loopexit(NULL); 2145 return; 2146 } 2147 2148 pri->count++; 2149 2150 evutil_timerclear(&tv); 2151 event_add(&pri->ev, &tv); 2152 } 2153 2154 static void 2155 test_priorities_impl(int npriorities) 2156 { 2157 struct test_pri_event one, two; 2158 struct timeval tv; 2159 2160 TT_BLATHER(("Testing Priorities %d: ", npriorities)); 2161 2162 event_base_priority_init(global_base, npriorities); 2163 2164 memset(&one, 0, sizeof(one)); 2165 memset(&two, 0, sizeof(two)); 2166 2167 timeout_set(&one.ev, test_priorities_cb, &one); 2168 if (event_priority_set(&one.ev, 0) == -1) { 2169 fprintf(stderr, "%s: failed to set priority", __func__); 2170 exit(1); 2171 } 2172 2173 timeout_set(&two.ev, test_priorities_cb, &two); 2174 if (event_priority_set(&two.ev, npriorities - 1) == -1) { 2175 fprintf(stderr, "%s: failed to set priority", __func__); 2176 exit(1); 2177 } 2178 2179 evutil_timerclear(&tv); 2180 2181 if (event_add(&one.ev, &tv) == -1) 2182 exit(1); 2183 if (event_add(&two.ev, &tv) == -1) 2184 exit(1); 2185 2186 event_dispatch(); 2187 2188 event_del(&one.ev); 2189 event_del(&two.ev); 2190 2191 if (npriorities == 1) { 2192 if (one.count == 3 && two.count == 3) 2193 test_ok = 1; 2194 } else if (npriorities == 2) { 2195 /* Two is called once because event_loopexit is priority 1 */ 2196 if (one.count == 3 && two.count == 1) 2197 test_ok = 1; 2198 } else { 2199 if (one.count == 3 && two.count == 0) 2200 test_ok = 1; 2201 } 2202 } 2203 2204 static void 2205 test_priorities(void) 2206 { 2207 test_priorities_impl(1); 2208 if (test_ok) 2209 test_priorities_impl(2); 2210 if (test_ok) 2211 test_priorities_impl(3); 2212 } 2213 2214 /* priority-active-inversion: activate a higher-priority event, and make sure 2215 * it keeps us from running a lower-priority event first. */ 2216 static int n_pai_calls = 0; 2217 static struct event pai_events[3]; 2218 2219 static void 2220 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg) 2221 { 2222 int *call_order = arg; 2223 *call_order = n_pai_calls++; 2224 if (n_pai_calls == 1) { 2225 /* This should activate later, even though it shares a 2226 priority with us. */ 2227 event_active(&pai_events[1], EV_READ, 1); 2228 /* This should activate next, since its priority is higher, 2229 even though we activated it second. */ 2230 event_active(&pai_events[2], EV_TIMEOUT, 1); 2231 } 2232 } 2233 2234 static void 2235 test_priority_active_inversion(void *data_) 2236 { 2237 struct basic_test_data *data = data_; 2238 struct event_base *base = data->base; 2239 int call_order[3]; 2240 int i; 2241 tt_int_op(event_base_priority_init(base, 8), ==, 0); 2242 2243 n_pai_calls = 0; 2244 memset(call_order, 0, sizeof(call_order)); 2245 2246 for (i=0;i<3;++i) { 2247 event_assign(&pai_events[i], data->base, -1, 0, 2248 prio_active_inversion_cb, &call_order[i]); 2249 } 2250 2251 event_priority_set(&pai_events[0], 4); 2252 event_priority_set(&pai_events[1], 4); 2253 event_priority_set(&pai_events[2], 0); 2254 2255 event_active(&pai_events[0], EV_WRITE, 1); 2256 2257 event_base_dispatch(base); 2258 tt_int_op(n_pai_calls, ==, 3); 2259 tt_int_op(call_order[0], ==, 0); 2260 tt_int_op(call_order[1], ==, 2); 2261 tt_int_op(call_order[2], ==, 1); 2262 end: 2263 ; 2264 } 2265 2266 2267 static void 2268 test_multiple_cb(evutil_socket_t fd, short event, void *arg) 2269 { 2270 if (event & EV_READ) 2271 test_ok |= 1; 2272 else if (event & EV_WRITE) 2273 test_ok |= 2; 2274 } 2275 2276 static void 2277 test_multiple_events_for_same_fd(void) 2278 { 2279 struct event e1, e2; 2280 2281 setup_test("Multiple events for same fd: "); 2282 2283 event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL); 2284 event_add(&e1, NULL); 2285 event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL); 2286 event_add(&e2, NULL); 2287 event_loop(EVLOOP_ONCE); 2288 event_del(&e2); 2289 2290 if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) { 2291 tt_fail_perror("write"); 2292 } 2293 2294 event_loop(EVLOOP_ONCE); 2295 event_del(&e1); 2296 2297 if (test_ok != 3) 2298 test_ok = 0; 2299 2300 cleanup_test(); 2301 } 2302 2303 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2304 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf); 2305 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number); 2306 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2307 2308 static void 2309 read_once_cb(evutil_socket_t fd, short event, void *arg) 2310 { 2311 char buf[256]; 2312 int len; 2313 2314 len = read(fd, buf, sizeof(buf)); 2315 2316 if (called) { 2317 test_ok = 0; 2318 } else if (len) { 2319 /* Assumes global pair[0] can be used for writing */ 2320 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2321 tt_fail_perror("write"); 2322 test_ok = 0; 2323 } else { 2324 test_ok = 1; 2325 } 2326 } 2327 2328 called++; 2329 } 2330 2331 static void 2332 test_want_only_once(void) 2333 { 2334 struct event ev; 2335 struct timeval tv; 2336 2337 /* Very simple read test */ 2338 setup_test("Want read only once: "); 2339 2340 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2341 tt_fail_perror("write"); 2342 } 2343 2344 /* Setup the loop termination */ 2345 evutil_timerclear(&tv); 2346 tv.tv_usec = 300*1000; 2347 event_loopexit(&tv); 2348 2349 event_set(&ev, pair[1], EV_READ, read_once_cb, &ev); 2350 if (event_add(&ev, NULL) == -1) 2351 exit(1); 2352 event_dispatch(); 2353 2354 cleanup_test(); 2355 } 2356 2357 #define TEST_MAX_INT 6 2358 2359 static void 2360 evtag_int_test(void *ptr) 2361 { 2362 struct evbuffer *tmp = evbuffer_new(); 2363 ev_uint32_t integers[TEST_MAX_INT] = { 2364 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2365 }; 2366 ev_uint32_t integer; 2367 ev_uint64_t big_int; 2368 int i; 2369 2370 evtag_init(); 2371 2372 for (i = 0; i < TEST_MAX_INT; i++) { 2373 int oldlen, newlen; 2374 oldlen = (int)EVBUFFER_LENGTH(tmp); 2375 evtag_encode_int(tmp, integers[i]); 2376 newlen = (int)EVBUFFER_LENGTH(tmp); 2377 TT_BLATHER(("encoded 0x%08x with %d bytes", 2378 (unsigned)integers[i], newlen - oldlen)); 2379 big_int = integers[i]; 2380 big_int *= 1000000000; /* 1 billion */ 2381 evtag_encode_int64(tmp, big_int); 2382 } 2383 2384 for (i = 0; i < TEST_MAX_INT; i++) { 2385 tt_int_op(evtag_decode_int(&integer, tmp), !=, -1); 2386 tt_uint_op(integer, ==, integers[i]); 2387 tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1); 2388 tt_assert((big_int / 1000000000) == integers[i]); 2389 } 2390 2391 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2392 end: 2393 evbuffer_free(tmp); 2394 } 2395 2396 static void 2397 evtag_fuzz(void *ptr) 2398 { 2399 unsigned char buffer[4096]; 2400 struct evbuffer *tmp = evbuffer_new(); 2401 struct timeval tv; 2402 int i, j; 2403 2404 int not_failed = 0; 2405 2406 evtag_init(); 2407 2408 for (j = 0; j < 100; j++) { 2409 for (i = 0; i < (int)sizeof(buffer); i++) 2410 buffer[i] = test_weakrand(); 2411 evbuffer_drain(tmp, -1); 2412 evbuffer_add(tmp, buffer, sizeof(buffer)); 2413 2414 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) 2415 not_failed++; 2416 } 2417 2418 /* The majority of decodes should fail */ 2419 tt_int_op(not_failed, <, 10); 2420 2421 /* Now insert some corruption into the tag length field */ 2422 evbuffer_drain(tmp, -1); 2423 evutil_timerclear(&tv); 2424 tv.tv_sec = 1; 2425 evtag_marshal_timeval(tmp, 0, &tv); 2426 evbuffer_add(tmp, buffer, sizeof(buffer)); 2427 2428 ((char *)EVBUFFER_DATA(tmp))[1] = '\xff'; 2429 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) { 2430 tt_abort_msg("evtag_unmarshal_timeval should have failed"); 2431 } 2432 2433 end: 2434 evbuffer_free(tmp); 2435 } 2436 2437 static void 2438 evtag_tag_encoding(void *ptr) 2439 { 2440 struct evbuffer *tmp = evbuffer_new(); 2441 ev_uint32_t integers[TEST_MAX_INT] = { 2442 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2443 }; 2444 ev_uint32_t integer; 2445 int i; 2446 2447 evtag_init(); 2448 2449 for (i = 0; i < TEST_MAX_INT; i++) { 2450 int oldlen, newlen; 2451 oldlen = (int)EVBUFFER_LENGTH(tmp); 2452 evtag_encode_tag(tmp, integers[i]); 2453 newlen = (int)EVBUFFER_LENGTH(tmp); 2454 TT_BLATHER(("encoded 0x%08x with %d bytes", 2455 (unsigned)integers[i], newlen - oldlen)); 2456 } 2457 2458 for (i = 0; i < TEST_MAX_INT; i++) { 2459 tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1); 2460 tt_uint_op(integer, ==, integers[i]); 2461 } 2462 2463 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2464 2465 end: 2466 evbuffer_free(tmp); 2467 } 2468 2469 static void 2470 evtag_test_peek(void *ptr) 2471 { 2472 struct evbuffer *tmp = evbuffer_new(); 2473 ev_uint32_t u32; 2474 2475 evtag_marshal_int(tmp, 30, 0); 2476 evtag_marshal_string(tmp, 40, "Hello world"); 2477 2478 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2479 tt_int_op(u32, ==, 30); 2480 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2481 tt_int_op(u32, ==, 1+1+1); 2482 tt_int_op(evtag_consume(tmp), ==, 0); 2483 2484 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2485 tt_int_op(u32, ==, 40); 2486 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2487 tt_int_op(u32, ==, 1+1+11); 2488 tt_int_op(evtag_payload_length(tmp, &u32), ==, 0); 2489 tt_int_op(u32, ==, 11); 2490 2491 end: 2492 evbuffer_free(tmp); 2493 } 2494 2495 2496 static void 2497 test_methods(void *ptr) 2498 { 2499 const char **methods = event_get_supported_methods(); 2500 struct event_config *cfg = NULL; 2501 struct event_base *base = NULL; 2502 const char *backend; 2503 int n_methods = 0; 2504 2505 tt_assert(methods); 2506 2507 backend = methods[0]; 2508 while (*methods != NULL) { 2509 TT_BLATHER(("Support method: %s", *methods)); 2510 ++methods; 2511 ++n_methods; 2512 } 2513 2514 cfg = event_config_new(); 2515 assert(cfg != NULL); 2516 2517 tt_int_op(event_config_avoid_method(cfg, backend), ==, 0); 2518 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2519 2520 base = event_base_new_with_config(cfg); 2521 if (n_methods > 1) { 2522 tt_assert(base); 2523 tt_str_op(backend, !=, event_base_get_method(base)); 2524 } else { 2525 tt_assert(base == NULL); 2526 } 2527 2528 end: 2529 if (base) 2530 event_base_free(base); 2531 if (cfg) 2532 event_config_free(cfg); 2533 } 2534 2535 static void 2536 test_version(void *arg) 2537 { 2538 const char *vstr; 2539 ev_uint32_t vint; 2540 int major, minor, patch, n; 2541 2542 vstr = event_get_version(); 2543 vint = event_get_version_number(); 2544 2545 tt_assert(vstr); 2546 tt_assert(vint); 2547 2548 tt_str_op(vstr, ==, LIBEVENT_VERSION); 2549 tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER); 2550 2551 n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch); 2552 tt_assert(3 == n); 2553 tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8))); 2554 end: 2555 ; 2556 } 2557 2558 static void 2559 test_base_features(void *arg) 2560 { 2561 struct event_base *base = NULL; 2562 struct event_config *cfg = NULL; 2563 2564 cfg = event_config_new(); 2565 2566 tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET)); 2567 2568 base = event_base_new_with_config(cfg); 2569 if (base) { 2570 tt_int_op(EV_FEATURE_ET, ==, 2571 event_base_get_features(base) & EV_FEATURE_ET); 2572 } else { 2573 base = event_base_new(); 2574 tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET); 2575 } 2576 2577 end: 2578 if (base) 2579 event_base_free(base); 2580 if (cfg) 2581 event_config_free(cfg); 2582 } 2583 2584 #ifdef EVENT__HAVE_SETENV 2585 #define SETENV_OK 2586 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV) 2587 static void setenv(const char *k, const char *v, int o_) 2588 { 2589 char b[256]; 2590 evutil_snprintf(b, sizeof(b), "%s=%s",k,v); 2591 putenv(b); 2592 } 2593 #define SETENV_OK 2594 #endif 2595 2596 #ifdef EVENT__HAVE_UNSETENV 2597 #define UNSETENV_OK 2598 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV) 2599 static void unsetenv(const char *k) 2600 { 2601 char b[256]; 2602 evutil_snprintf(b, sizeof(b), "%s=",k); 2603 putenv(b); 2604 } 2605 #define UNSETENV_OK 2606 #endif 2607 2608 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2609 static void 2610 methodname_to_envvar(const char *mname, char *buf, size_t buflen) 2611 { 2612 char *cp; 2613 evutil_snprintf(buf, buflen, "EVENT_NO%s", mname); 2614 for (cp = buf; *cp; ++cp) { 2615 *cp = EVUTIL_TOUPPER_(*cp); 2616 } 2617 } 2618 #endif 2619 2620 static void 2621 test_base_environ(void *arg) 2622 { 2623 struct event_base *base = NULL; 2624 struct event_config *cfg = NULL; 2625 2626 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2627 const char **basenames; 2628 int i, n_methods=0; 2629 char varbuf[128]; 2630 const char *defaultname, *ignoreenvname; 2631 2632 /* See if unsetenv works before we rely on it. */ 2633 setenv("EVENT_NOWAFFLES", "1", 1); 2634 unsetenv("EVENT_NOWAFFLES"); 2635 if (getenv("EVENT_NOWAFFLES") != NULL) { 2636 #ifndef EVENT__HAVE_UNSETENV 2637 TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test")); 2638 #else 2639 TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test")); 2640 #endif 2641 tt_skip(); 2642 } 2643 2644 basenames = event_get_supported_methods(); 2645 for (i = 0; basenames[i]; ++i) { 2646 methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf)); 2647 unsetenv(varbuf); 2648 ++n_methods; 2649 } 2650 2651 base = event_base_new(); 2652 tt_assert(base); 2653 2654 defaultname = event_base_get_method(base); 2655 TT_BLATHER(("default is <%s>", defaultname)); 2656 event_base_free(base); 2657 base = NULL; 2658 2659 /* Can we disable the method with EVENT_NOfoo ? */ 2660 if (!strcmp(defaultname, "epoll (with changelist)")) { 2661 setenv("EVENT_NOEPOLL", "1", 1); 2662 ignoreenvname = "epoll"; 2663 } else { 2664 methodname_to_envvar(defaultname, varbuf, sizeof(varbuf)); 2665 setenv(varbuf, "1", 1); 2666 ignoreenvname = defaultname; 2667 } 2668 2669 /* Use an empty cfg rather than NULL so a failure doesn't exit() */ 2670 cfg = event_config_new(); 2671 base = event_base_new_with_config(cfg); 2672 event_config_free(cfg); 2673 cfg = NULL; 2674 if (n_methods == 1) { 2675 tt_assert(!base); 2676 } else { 2677 tt_assert(base); 2678 tt_str_op(defaultname, !=, event_base_get_method(base)); 2679 event_base_free(base); 2680 base = NULL; 2681 } 2682 2683 /* Can we disable looking at the environment with IGNORE_ENV ? */ 2684 cfg = event_config_new(); 2685 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2686 base = event_base_new_with_config(cfg); 2687 tt_assert(base); 2688 tt_str_op(ignoreenvname, ==, event_base_get_method(base)); 2689 #else 2690 tt_skip(); 2691 #endif 2692 2693 end: 2694 if (base) 2695 event_base_free(base); 2696 if (cfg) 2697 event_config_free(cfg); 2698 } 2699 2700 static void 2701 read_called_once_cb(evutil_socket_t fd, short event, void *arg) 2702 { 2703 tt_int_op(event, ==, EV_READ); 2704 called += 1; 2705 end: 2706 ; 2707 } 2708 2709 static void 2710 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg) 2711 { 2712 tt_int_op(event, ==, EV_TIMEOUT); 2713 called += 100; 2714 end: 2715 ; 2716 } 2717 2718 static void 2719 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg) 2720 { 2721 tt_int_op(event, ==, EV_TIMEOUT); 2722 called += 1000; 2723 end: 2724 ; 2725 } 2726 2727 static void 2728 test_event_once(void *ptr) 2729 { 2730 struct basic_test_data *data = ptr; 2731 struct timeval tv; 2732 int r; 2733 2734 tv.tv_sec = 0; 2735 tv.tv_usec = 50*1000; 2736 called = 0; 2737 r = event_base_once(data->base, data->pair[0], EV_READ, 2738 read_called_once_cb, NULL, NULL); 2739 tt_int_op(r, ==, 0); 2740 r = event_base_once(data->base, -1, EV_TIMEOUT, 2741 timeout_called_once_cb, NULL, &tv); 2742 tt_int_op(r, ==, 0); 2743 r = event_base_once(data->base, -1, 0, NULL, NULL, NULL); 2744 tt_int_op(r, <, 0); 2745 r = event_base_once(data->base, -1, EV_TIMEOUT, 2746 immediate_called_twice_cb, NULL, NULL); 2747 tt_int_op(r, ==, 0); 2748 tv.tv_sec = 0; 2749 tv.tv_usec = 0; 2750 r = event_base_once(data->base, -1, EV_TIMEOUT, 2751 immediate_called_twice_cb, NULL, &tv); 2752 tt_int_op(r, ==, 0); 2753 2754 if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) { 2755 tt_fail_perror("write"); 2756 } 2757 2758 shutdown(data->pair[1], EVUTIL_SHUT_WR); 2759 2760 event_base_dispatch(data->base); 2761 2762 tt_int_op(called, ==, 2101); 2763 end: 2764 ; 2765 } 2766 2767 static void 2768 test_event_once_never(void *ptr) 2769 { 2770 struct basic_test_data *data = ptr; 2771 struct timeval tv; 2772 2773 /* Have one trigger in 10 seconds (don't worry, because) */ 2774 tv.tv_sec = 10; 2775 tv.tv_usec = 0; 2776 called = 0; 2777 event_base_once(data->base, -1, EV_TIMEOUT, 2778 timeout_called_once_cb, NULL, &tv); 2779 2780 /* But shut down the base in 75 msec. */ 2781 tv.tv_sec = 0; 2782 tv.tv_usec = 75*1000; 2783 event_base_loopexit(data->base, &tv); 2784 2785 event_base_dispatch(data->base); 2786 2787 tt_int_op(called, ==, 0); 2788 end: 2789 ; 2790 } 2791 2792 static void 2793 test_event_pending(void *ptr) 2794 { 2795 struct basic_test_data *data = ptr; 2796 struct event *r=NULL, *w=NULL, *t=NULL; 2797 struct timeval tv, now, tv2; 2798 2799 tv.tv_sec = 0; 2800 tv.tv_usec = 500 * 1000; 2801 r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb, 2802 NULL); 2803 w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb, 2804 NULL); 2805 t = evtimer_new(data->base, timeout_cb, NULL); 2806 2807 tt_assert(r); 2808 tt_assert(w); 2809 tt_assert(t); 2810 2811 evutil_gettimeofday(&now, NULL); 2812 event_add(r, NULL); 2813 event_add(t, &tv); 2814 2815 tt_assert( event_pending(r, EV_READ, NULL)); 2816 tt_assert(!event_pending(w, EV_WRITE, NULL)); 2817 tt_assert(!event_pending(r, EV_WRITE, NULL)); 2818 tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL)); 2819 tt_assert(!event_pending(r, EV_TIMEOUT, NULL)); 2820 tt_assert( event_pending(t, EV_TIMEOUT, NULL)); 2821 tt_assert( event_pending(t, EV_TIMEOUT, &tv2)); 2822 2823 tt_assert(evutil_timercmp(&tv2, &now, >)); 2824 2825 test_timeval_diff_eq(&now, &tv2, 500); 2826 2827 end: 2828 if (r) { 2829 event_del(r); 2830 event_free(r); 2831 } 2832 if (w) { 2833 event_del(w); 2834 event_free(w); 2835 } 2836 if (t) { 2837 event_del(t); 2838 event_free(t); 2839 } 2840 } 2841 2842 static void 2843 dfd_cb(evutil_socket_t fd, short e, void *data) 2844 { 2845 *(int*)data = (int)e; 2846 } 2847 2848 static void 2849 test_event_closed_fd_poll(void *arg) 2850 { 2851 struct timeval tv; 2852 struct event *e; 2853 struct basic_test_data *data = (struct basic_test_data *)arg; 2854 int i = 0; 2855 2856 if (strcmp(event_base_get_method(data->base), "poll")) { 2857 tinytest_set_test_skipped_(); 2858 return; 2859 } 2860 2861 e = event_new(data->base, data->pair[0], EV_READ, dfd_cb, &i); 2862 tt_assert(e); 2863 2864 tv.tv_sec = 0; 2865 tv.tv_usec = 500 * 1000; 2866 event_add(e, &tv); 2867 tt_assert(event_pending(e, EV_READ, NULL)); 2868 close(data->pair[0]); 2869 data->pair[0] = -1; /** avoids double-close */ 2870 event_base_loop(data->base, EVLOOP_ONCE); 2871 tt_int_op(i, ==, EV_READ); 2872 2873 end: 2874 if (e) { 2875 event_del(e); 2876 event_free(e); 2877 } 2878 } 2879 2880 #ifndef _WIN32 2881 /* You can't do this test on windows, since dup2 doesn't work on sockets */ 2882 2883 /* Regression test for our workaround for a fun epoll/linux related bug 2884 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2) 2885 * will get you an EEXIST */ 2886 static void 2887 test_dup_fd(void *arg) 2888 { 2889 struct basic_test_data *data = arg; 2890 struct event_base *base = data->base; 2891 struct event *ev1=NULL, *ev2=NULL; 2892 int fd, dfd=-1; 2893 int ev1_got, ev2_got; 2894 2895 tt_int_op(write(data->pair[0], "Hello world", 2896 strlen("Hello world")), >, 0); 2897 fd = data->pair[1]; 2898 2899 dfd = dup(fd); 2900 tt_int_op(dfd, >=, 0); 2901 2902 ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got); 2903 ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got); 2904 ev1_got = ev2_got = 0; 2905 event_add(ev1, NULL); 2906 event_add(ev2, NULL); 2907 event_base_loop(base, EVLOOP_ONCE); 2908 tt_int_op(ev1_got, ==, EV_READ); 2909 tt_int_op(ev2_got, ==, EV_READ); 2910 2911 /* Now close and delete dfd then dispatch. We need to do the 2912 * dispatch here so that when we add it later, we think there 2913 * was an intermediate delete. */ 2914 close(dfd); 2915 event_del(ev2); 2916 ev1_got = ev2_got = 0; 2917 event_base_loop(base, EVLOOP_ONCE); 2918 tt_want_int_op(ev1_got, ==, EV_READ); 2919 tt_int_op(ev2_got, ==, 0); 2920 2921 /* Re-duplicate the fd. We need to get the same duplicated 2922 * value that we closed to provoke the epoll quirk. Also, we 2923 * need to change the events to write, or else the old lingering 2924 * read event will make the test pass whether the change was 2925 * successful or not. */ 2926 tt_int_op(dup2(fd, dfd), ==, dfd); 2927 event_free(ev2); 2928 ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got); 2929 event_add(ev2, NULL); 2930 ev1_got = ev2_got = 0; 2931 event_base_loop(base, EVLOOP_ONCE); 2932 tt_want_int_op(ev1_got, ==, EV_READ); 2933 tt_int_op(ev2_got, ==, EV_WRITE); 2934 2935 end: 2936 if (ev1) 2937 event_free(ev1); 2938 if (ev2) 2939 event_free(ev2); 2940 if (dfd >= 0) 2941 close(dfd); 2942 } 2943 #endif 2944 2945 #ifdef EVENT__DISABLE_MM_REPLACEMENT 2946 static void 2947 test_mm_functions(void *arg) 2948 { 2949 tinytest_set_test_skipped_(); 2950 } 2951 #else 2952 static int 2953 check_dummy_mem_ok(void *mem_) 2954 { 2955 char *mem = mem_; 2956 mem -= 16; 2957 return !memcmp(mem, "{[<guardedram>]}", 16); 2958 } 2959 2960 static void * 2961 dummy_malloc(size_t len) 2962 { 2963 char *mem = malloc(len+16); 2964 memcpy(mem, "{[<guardedram>]}", 16); 2965 return mem+16; 2966 } 2967 2968 static void * 2969 dummy_realloc(void *mem_, size_t len) 2970 { 2971 char *mem = mem_; 2972 if (!mem) 2973 return dummy_malloc(len); 2974 tt_want(check_dummy_mem_ok(mem_)); 2975 mem -= 16; 2976 mem = realloc(mem, len+16); 2977 return mem+16; 2978 } 2979 2980 static void 2981 dummy_free(void *mem_) 2982 { 2983 char *mem = mem_; 2984 tt_want(check_dummy_mem_ok(mem_)); 2985 mem -= 16; 2986 free(mem); 2987 } 2988 2989 static void 2990 test_mm_functions(void *arg) 2991 { 2992 struct event_base *b = NULL; 2993 struct event_config *cfg = NULL; 2994 event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free); 2995 cfg = event_config_new(); 2996 event_config_avoid_method(cfg, "Nonesuch"); 2997 b = event_base_new_with_config(cfg); 2998 tt_assert(b); 2999 tt_assert(check_dummy_mem_ok(b)); 3000 end: 3001 if (cfg) 3002 event_config_free(cfg); 3003 if (b) 3004 event_base_free(b); 3005 } 3006 #endif 3007 3008 static void 3009 many_event_cb(evutil_socket_t fd, short event, void *arg) 3010 { 3011 int *calledp = arg; 3012 *calledp += 1; 3013 } 3014 3015 static void 3016 test_many_events(void *arg) 3017 { 3018 /* Try 70 events that should all be ready at once. This will 3019 * exercise the "resize" code on most of the backends, and will make 3020 * sure that we can get past the 64-handle limit of some windows 3021 * functions. */ 3022 #define MANY 70 3023 3024 struct basic_test_data *data = arg; 3025 struct event_base *base = data->base; 3026 int one_at_a_time = data->setup_data != NULL; 3027 evutil_socket_t sock[MANY]; 3028 struct event *ev[MANY]; 3029 int xcalled[MANY]; 3030 int i; 3031 int loopflags = EVLOOP_NONBLOCK, evflags=0; 3032 if (one_at_a_time) { 3033 loopflags |= EVLOOP_ONCE; 3034 evflags = EV_PERSIST; 3035 } 3036 3037 memset(sock, 0xff, sizeof(sock)); 3038 memset(ev, 0, sizeof(ev)); 3039 memset(xcalled, 0, sizeof(xcalled)); 3040 3041 for (i = 0; i < MANY; ++i) { 3042 /* We need an event that will hit the backend, and that will 3043 * be ready immediately. "Send a datagram" is an easy 3044 * instance of that. */ 3045 sock[i] = socket(AF_INET, SOCK_DGRAM, 0); 3046 tt_assert(sock[i] >= 0); 3047 xcalled[i] = 0; 3048 ev[i] = event_new(base, sock[i], EV_WRITE|evflags, 3049 many_event_cb, &xcalled[i]); 3050 event_add(ev[i], NULL); 3051 if (one_at_a_time) 3052 event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); 3053 } 3054 3055 event_base_loop(base, loopflags); 3056 3057 for (i = 0; i < MANY; ++i) { 3058 if (one_at_a_time) 3059 tt_int_op(xcalled[i], ==, MANY - i + 1); 3060 else 3061 tt_int_op(xcalled[i], ==, 1); 3062 } 3063 3064 end: 3065 for (i = 0; i < MANY; ++i) { 3066 if (ev[i]) 3067 event_free(ev[i]); 3068 if (sock[i] >= 0) 3069 evutil_closesocket(sock[i]); 3070 } 3071 #undef MANY 3072 } 3073 3074 static void 3075 test_struct_event_size(void *arg) 3076 { 3077 tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event)); 3078 end: 3079 ; 3080 } 3081 3082 static void 3083 test_get_assignment(void *arg) 3084 { 3085 struct basic_test_data *data = arg; 3086 struct event_base *base = data->base; 3087 struct event *ev1 = NULL; 3088 const char *str = "foo"; 3089 3090 struct event_base *b; 3091 evutil_socket_t s; 3092 short what; 3093 event_callback_fn cb; 3094 void *cb_arg; 3095 3096 ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, 3097 __UNCONST(str)); 3098 event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg); 3099 3100 tt_ptr_op(b, ==, base); 3101 tt_int_op(s, ==, data->pair[1]); 3102 tt_int_op(what, ==, EV_READ); 3103 tt_ptr_op(cb, ==, dummy_read_cb); 3104 tt_ptr_op(cb_arg, ==, str); 3105 3106 /* Now make sure this doesn't crash. */ 3107 event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL); 3108 3109 end: 3110 if (ev1) 3111 event_free(ev1); 3112 } 3113 3114 struct foreach_helper { 3115 int count; 3116 const struct event *ev; 3117 }; 3118 3119 static int 3120 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg) 3121 { 3122 struct foreach_helper *h = event_get_callback_arg(ev); 3123 struct timeval *tv = arg; 3124 if (event_get_callback(ev) != timeout_cb) 3125 return 0; 3126 tt_ptr_op(event_get_base(ev), ==, base); 3127 tt_int_op(tv->tv_sec, ==, 10); 3128 h->ev = ev; 3129 h->count++; 3130 return 0; 3131 end: 3132 return -1; 3133 } 3134 3135 static int 3136 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg) 3137 { 3138 const struct event **ev_out = arg; 3139 struct foreach_helper *h = event_get_callback_arg(ev); 3140 if (event_get_callback(ev) != timeout_cb) 3141 return 0; 3142 if (h->count == 99) { 3143 *ev_out = ev; 3144 return 101; 3145 } 3146 return 0; 3147 } 3148 3149 static void 3150 test_event_foreach(void *arg) 3151 { 3152 struct basic_test_data *data = arg; 3153 struct event_base *base = data->base; 3154 struct event *ev[5]; 3155 struct foreach_helper visited[5]; 3156 int i; 3157 struct timeval ten_sec = {10,0}; 3158 const struct event *ev_found = NULL; 3159 3160 for (i = 0; i < 5; ++i) { 3161 visited[i].count = 0; 3162 visited[i].ev = NULL; 3163 ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]); 3164 } 3165 3166 tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL)); 3167 tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL)); 3168 3169 event_add(ev[0], &ten_sec); 3170 event_add(ev[1], &ten_sec); 3171 event_active(ev[1], EV_TIMEOUT, 1); 3172 event_active(ev[2], EV_TIMEOUT, 1); 3173 event_add(ev[3], &ten_sec); 3174 /* Don't touch ev[4]. */ 3175 3176 tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb, 3177 &ten_sec)); 3178 tt_int_op(1, ==, visited[0].count); 3179 tt_int_op(1, ==, visited[1].count); 3180 tt_int_op(1, ==, visited[2].count); 3181 tt_int_op(1, ==, visited[3].count); 3182 tt_ptr_op(ev[0], ==, visited[0].ev); 3183 tt_ptr_op(ev[1], ==, visited[1].ev); 3184 tt_ptr_op(ev[2], ==, visited[2].ev); 3185 tt_ptr_op(ev[3], ==, visited[3].ev); 3186 3187 visited[2].count = 99; 3188 tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb, 3189 &ev_found)); 3190 tt_ptr_op(ev_found, ==, ev[2]); 3191 3192 end: 3193 for (i=0; i<5; ++i) { 3194 event_free(ev[i]); 3195 } 3196 } 3197 3198 static struct event_base *cached_time_base = NULL; 3199 static int cached_time_reset = 0; 3200 static int cached_time_sleep = 0; 3201 static void 3202 cache_time_cb(evutil_socket_t fd, short what, void *arg) 3203 { 3204 struct timeval *tv = arg; 3205 tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv)); 3206 if (cached_time_sleep) { 3207 struct timeval delay = { 0, 30*1000 }; 3208 evutil_usleep_(&delay); 3209 } 3210 if (cached_time_reset) { 3211 event_base_update_cache_time(cached_time_base); 3212 } 3213 end: 3214 ; 3215 } 3216 3217 static void 3218 test_gettimeofday_cached(void *arg) 3219 { 3220 struct basic_test_data *data = arg; 3221 struct event_config *cfg = NULL; 3222 struct event_base *base = NULL; 3223 struct timeval tv1, tv2, tv3, now; 3224 struct event *ev1=NULL, *ev2=NULL, *ev3=NULL; 3225 int cached_time_disable = strstr(data->setup_data, "disable") != NULL; 3226 3227 cfg = event_config_new(); 3228 if (cached_time_disable) { 3229 event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME); 3230 } 3231 cached_time_base = base = event_base_new_with_config(cfg); 3232 tt_assert(base); 3233 3234 /* Try gettimeofday_cached outside of an event loop. */ 3235 evutil_gettimeofday(&now, NULL); 3236 tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1)); 3237 tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2)); 3238 tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10); 3239 tt_int_op(timeval_msec_diff(&tv1, &now), <, 10); 3240 3241 cached_time_reset = strstr(data->setup_data, "reset") != NULL; 3242 cached_time_sleep = strstr(data->setup_data, "sleep") != NULL; 3243 3244 ev1 = event_new(base, -1, 0, cache_time_cb, &tv1); 3245 ev2 = event_new(base, -1, 0, cache_time_cb, &tv2); 3246 ev3 = event_new(base, -1, 0, cache_time_cb, &tv3); 3247 3248 event_active(ev1, EV_TIMEOUT, 1); 3249 event_active(ev2, EV_TIMEOUT, 1); 3250 event_active(ev3, EV_TIMEOUT, 1); 3251 3252 event_base_dispatch(base); 3253 3254 if (cached_time_reset && cached_time_sleep) { 3255 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3256 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3257 } else if (cached_time_disable && cached_time_sleep) { 3258 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3259 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3260 } else if (! cached_time_disable) { 3261 tt_assert(evutil_timercmp(&tv1, &tv2, ==)); 3262 tt_assert(evutil_timercmp(&tv2, &tv3, ==)); 3263 } 3264 3265 end: 3266 if (ev1) 3267 event_free(ev1); 3268 if (ev2) 3269 event_free(ev2); 3270 if (ev3) 3271 event_free(ev3); 3272 if (base) 3273 event_base_free(base); 3274 if (cfg) 3275 event_config_free(cfg); 3276 } 3277 3278 static void 3279 tabf_cb(evutil_socket_t fd, short what, void *arg) 3280 { 3281 int *ptr = arg; 3282 *ptr = what; 3283 *ptr += 0x10000; 3284 } 3285 3286 static void 3287 test_active_by_fd(void *arg) 3288 { 3289 struct basic_test_data *data = arg; 3290 struct event_base *base = data->base; 3291 struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL; 3292 int e1,e2,e3,e4; 3293 #ifndef _WIN32 3294 struct event *evsig = NULL; 3295 int es; 3296 #endif 3297 struct timeval tenmin = { 600, 0 }; 3298 3299 /* Ensure no crash on nonexistent FD. */ 3300 event_base_active_by_fd(base, 1000, EV_READ); 3301 3302 /* Ensure no crash on bogus FD. */ 3303 event_base_active_by_fd(base, -1, EV_READ); 3304 3305 /* Ensure no crash on nonexistent/bogus signal. */ 3306 event_base_active_by_signal(base, 1000); 3307 event_base_active_by_signal(base, -1); 3308 3309 event_base_assert_ok_(base); 3310 3311 e1 = e2 = e3 = e4 = 0; 3312 ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1); 3313 ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2); 3314 ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3); 3315 ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4); 3316 tt_assert(ev1); 3317 tt_assert(ev2); 3318 tt_assert(ev3); 3319 tt_assert(ev4); 3320 #ifndef _WIN32 3321 evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es); 3322 tt_assert(evsig); 3323 event_add(evsig, &tenmin); 3324 #endif 3325 3326 event_add(ev1, &tenmin); 3327 event_add(ev2, NULL); 3328 event_add(ev3, NULL); 3329 event_add(ev4, &tenmin); 3330 3331 3332 event_base_assert_ok_(base); 3333 3334 /* Trigger 2, 3, 4 */ 3335 event_base_active_by_fd(base, data->pair[0], EV_WRITE); 3336 event_base_active_by_fd(base, data->pair[1], EV_READ); 3337 #ifndef _WIN32 3338 event_base_active_by_signal(base, SIGHUP); 3339 #endif 3340 3341 event_base_assert_ok_(base); 3342 3343 event_base_loop(base, EVLOOP_ONCE); 3344 3345 tt_int_op(e1, ==, 0); 3346 tt_int_op(e2, ==, EV_WRITE | 0x10000); 3347 tt_int_op(e3, ==, EV_READ | 0x10000); 3348 /* Mask out EV_WRITE here, since it could be genuinely writeable. */ 3349 tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000); 3350 #ifndef _WIN32 3351 tt_int_op(es, ==, EV_SIGNAL | 0x10000); 3352 #endif 3353 3354 end: 3355 if (ev1) 3356 event_free(ev1); 3357 if (ev2) 3358 event_free(ev2); 3359 if (ev3) 3360 event_free(ev3); 3361 if (ev4) 3362 event_free(ev4); 3363 #ifndef _WIN32 3364 if (evsig) 3365 event_free(evsig); 3366 #endif 3367 } 3368 3369 struct testcase_t main_testcases[] = { 3370 /* Some converted-over tests */ 3371 { "methods", test_methods, TT_FORK, NULL, NULL }, 3372 { "version", test_version, 0, NULL, NULL }, 3373 BASIC(base_features, TT_FORK|TT_NO_LOGS), 3374 { "base_environ", test_base_environ, TT_FORK, NULL, NULL }, 3375 3376 BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR), 3377 BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR), 3378 3379 BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE), 3380 BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE), 3381 BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE), 3382 BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE), 3383 BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE), 3384 3385 BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3386 BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3387 BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3388 BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3389 3390 /* These are still using the old API */ 3391 LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE), 3392 { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3393 { "persistent_active_timeout", test_persistent_active_timeout, 3394 TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3395 LEGACY(priorities, TT_FORK|TT_NEED_BASE), 3396 BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE), 3397 { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE, 3398 &basic_setup, NULL }, 3399 3400 /* These legacy tests may not all need all of these flags. */ 3401 LEGACY(simpleread, TT_ISOLATED), 3402 LEGACY(simpleread_multiple, TT_ISOLATED), 3403 LEGACY(simplewrite, TT_ISOLATED), 3404 { "simpleclose", test_simpleclose, TT_FORK, &basic_setup, 3405 NULL }, 3406 LEGACY(multiple, TT_ISOLATED), 3407 LEGACY(persistent, TT_ISOLATED), 3408 LEGACY(combined, TT_ISOLATED), 3409 LEGACY(simpletimeout, TT_ISOLATED), 3410 LEGACY(loopbreak, TT_ISOLATED), 3411 LEGACY(loopexit, TT_ISOLATED), 3412 LEGACY(loopexit_multiple, TT_ISOLATED), 3413 LEGACY(nonpersist_readd, TT_ISOLATED), 3414 LEGACY(multiple_events_for_same_fd, TT_ISOLATED), 3415 LEGACY(want_only_once, TT_ISOLATED), 3416 { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL }, 3417 { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL }, 3418 { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup, 3419 NULL }, 3420 { "event_closed_fd_poll", test_event_closed_fd_poll, TT_ISOLATED, &basic_setup, 3421 NULL }, 3422 3423 #ifndef _WIN32 3424 { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL }, 3425 #endif 3426 { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL }, 3427 { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL }, 3428 { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 }, 3429 3430 { "struct_event_size", test_struct_event_size, 0, NULL, NULL }, 3431 BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3432 3433 BASIC(event_foreach, TT_FORK|TT_NEED_BASE), 3434 { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("") }, 3435 { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep") }, 3436 { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep reset") }, 3437 { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("sleep disable") }, 3438 { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, __UNCONST("disable") }, 3439 3440 BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3441 3442 #ifndef _WIN32 3443 LEGACY(fork, TT_ISOLATED), 3444 #endif 3445 #ifdef EVENT__HAVE_PTHREADS 3446 /** TODO: support win32 */ 3447 LEGACY(del_wait, TT_ISOLATED|TT_NEED_THREADS), 3448 #endif 3449 3450 END_OF_TESTCASES 3451 }; 3452 3453 struct testcase_t evtag_testcases[] = { 3454 { "int", evtag_int_test, TT_FORK, NULL, NULL }, 3455 { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL }, 3456 { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL }, 3457 { "peek", evtag_test_peek, 0, NULL, NULL }, 3458 3459 END_OF_TESTCASES 3460 }; 3461 3462 struct testcase_t signal_testcases[] = { 3463 #ifndef _WIN32 3464 LEGACY(simplestsignal, TT_ISOLATED), 3465 LEGACY(simplesignal, TT_ISOLATED), 3466 LEGACY(multiplesignal, TT_ISOLATED), 3467 LEGACY(immediatesignal, TT_ISOLATED), 3468 LEGACY(signal_dealloc, TT_ISOLATED), 3469 LEGACY(signal_pipeloss, TT_ISOLATED), 3470 LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS), 3471 LEGACY(signal_restore, TT_ISOLATED), 3472 LEGACY(signal_assert, TT_ISOLATED), 3473 LEGACY(signal_while_processing, TT_ISOLATED), 3474 #endif 3475 END_OF_TESTCASES 3476 }; 3477 3478