1 /* $NetBSD: regress.c,v 1.7 2020/05/25 20:47:34 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu> 5 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #include "util-internal.h" 30 31 #ifdef _WIN32 32 #include <winsock2.h> 33 #include <windows.h> 34 #endif 35 36 #include "event2/event-config.h" 37 38 #include <sys/types.h> 39 #include <sys/stat.h> 40 #ifdef EVENT__HAVE_SYS_TIME_H 41 #include <sys/time.h> 42 #endif 43 #include <sys/queue.h> 44 #ifndef _WIN32 45 #include <sys/socket.h> 46 #include <sys/wait.h> 47 #include <signal.h> 48 #include <unistd.h> 49 #include <netdb.h> 50 #endif 51 #include <fcntl.h> 52 #include <signal.h> 53 #include <stdlib.h> 54 #include <stdio.h> 55 #include <string.h> 56 #include <errno.h> 57 #include <assert.h> 58 #include <ctype.h> 59 60 #include "event2/event.h" 61 #include "event2/event_struct.h" 62 #include "event2/event_compat.h" 63 #include "event2/tag.h" 64 #include "event2/buffer.h" 65 #include "event2/buffer_compat.h" 66 #include "event2/util.h" 67 #include "event-internal.h" 68 #include "evthread-internal.h" 69 #include "log-internal.h" 70 #include "time-internal.h" 71 72 #include "regress.h" 73 74 #ifndef _WIN32 75 #include "regress.gen.h" 76 #endif 77 78 evutil_socket_t pair[2]; 79 int test_ok; 80 int called; 81 struct event_base *global_base; 82 83 static char wbuf[4096]; 84 static char rbuf[4096]; 85 static int woff; 86 static int roff; 87 static int usepersist; 88 static struct timeval tset; 89 static struct timeval tcalled; 90 91 92 #define TEST1 "this is a test" 93 94 #ifndef SHUT_WR 95 #define SHUT_WR 1 96 #endif 97 98 #ifdef _WIN32 99 #define write(fd,buf,len) send((fd),(buf),(int)(len),0) 100 #define read(fd,buf,len) recv((fd),(buf),(int)(len),0) 101 #endif 102 103 struct basic_cb_args 104 { 105 struct event_base *eb; 106 struct event *ev; 107 unsigned int callcount; 108 }; 109 110 static void 111 simple_read_cb(evutil_socket_t fd, short event, void *arg) 112 { 113 char buf[256]; 114 int len; 115 116 len = read(fd, buf, sizeof(buf)); 117 118 if (len) { 119 if (!called) { 120 if (event_add(arg, NULL) == -1) 121 exit(1); 122 } 123 } else if (called == 1) 124 test_ok = 1; 125 126 called++; 127 } 128 129 static void 130 basic_read_cb(evutil_socket_t fd, short event, void *data) 131 { 132 char buf[256]; 133 int len; 134 struct basic_cb_args *arg = data; 135 136 len = read(fd, buf, sizeof(buf)); 137 138 if (len < 0) { 139 tt_fail_perror("read (callback)"); 140 } else { 141 switch (arg->callcount++) { 142 case 0: /* first call: expect to read data; cycle */ 143 if (len > 0) 144 return; 145 146 tt_fail_msg("EOF before data read"); 147 break; 148 149 case 1: /* second call: expect EOF; stop */ 150 if (len > 0) 151 tt_fail_msg("not all data read on first cycle"); 152 break; 153 154 default: /* third call: should not happen */ 155 tt_fail_msg("too many cycles"); 156 } 157 } 158 159 event_del(arg->ev); 160 event_base_loopexit(arg->eb, NULL); 161 } 162 163 static void 164 dummy_read_cb(evutil_socket_t fd, short event, void *arg) 165 { 166 } 167 168 static void 169 simple_write_cb(evutil_socket_t fd, short event, void *arg) 170 { 171 int len; 172 173 len = write(fd, TEST1, strlen(TEST1) + 1); 174 if (len == -1) 175 test_ok = 0; 176 else 177 test_ok = 1; 178 } 179 180 static void 181 multiple_write_cb(evutil_socket_t fd, short event, void *arg) 182 { 183 struct event *ev = arg; 184 int len; 185 186 len = 128; 187 if (woff + len >= (int)sizeof(wbuf)) 188 len = sizeof(wbuf) - woff; 189 190 len = write(fd, wbuf + woff, len); 191 if (len == -1) { 192 fprintf(stderr, "%s: write\n", __func__); 193 if (usepersist) 194 event_del(ev); 195 return; 196 } 197 198 woff += len; 199 200 if (woff >= (int)sizeof(wbuf)) { 201 shutdown(fd, SHUT_WR); 202 if (usepersist) 203 event_del(ev); 204 return; 205 } 206 207 if (!usepersist) { 208 if (event_add(ev, NULL) == -1) 209 exit(1); 210 } 211 } 212 213 static void 214 multiple_read_cb(evutil_socket_t fd, short event, void *arg) 215 { 216 struct event *ev = arg; 217 int len; 218 219 len = read(fd, rbuf + roff, sizeof(rbuf) - roff); 220 if (len == -1) 221 fprintf(stderr, "%s: read\n", __func__); 222 if (len <= 0) { 223 if (usepersist) 224 event_del(ev); 225 return; 226 } 227 228 roff += len; 229 if (!usepersist) { 230 if (event_add(ev, NULL) == -1) 231 exit(1); 232 } 233 } 234 235 static void 236 timeout_cb(evutil_socket_t fd, short event, void *arg) 237 { 238 evutil_gettimeofday(&tcalled, NULL); 239 } 240 241 struct both { 242 struct event ev; 243 int nread; 244 }; 245 246 static void 247 combined_read_cb(evutil_socket_t fd, short event, void *arg) 248 { 249 struct both *both = arg; 250 char buf[128]; 251 int len; 252 253 len = read(fd, buf, sizeof(buf)); 254 if (len == -1) 255 fprintf(stderr, "%s: read\n", __func__); 256 if (len <= 0) 257 return; 258 259 both->nread += len; 260 if (event_add(&both->ev, NULL) == -1) 261 exit(1); 262 } 263 264 static void 265 combined_write_cb(evutil_socket_t fd, short event, void *arg) 266 { 267 struct both *both = arg; 268 char buf[128]; 269 int len; 270 271 len = sizeof(buf); 272 if (len > both->nread) 273 len = both->nread; 274 275 memset(buf, 'q', len); 276 277 len = write(fd, buf, len); 278 if (len == -1) 279 fprintf(stderr, "%s: write\n", __func__); 280 if (len <= 0) { 281 shutdown(fd, SHUT_WR); 282 return; 283 } 284 285 both->nread -= len; 286 if (event_add(&both->ev, NULL) == -1) 287 exit(1); 288 } 289 290 /* These macros used to replicate the work of the legacy test wrapper code */ 291 #define setup_test(x) do { \ 292 if (!in_legacy_test_wrapper) { \ 293 TT_FAIL(("Legacy test %s not wrapped properly", x)); \ 294 return; \ 295 } \ 296 } while (0) 297 #define cleanup_test() setup_test("cleanup") 298 299 static void 300 test_simpleread(void) 301 { 302 struct event ev; 303 304 /* Very simple read test */ 305 setup_test("Simple read: "); 306 307 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 308 tt_fail_perror("write"); 309 } 310 311 shutdown(pair[0], SHUT_WR); 312 313 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); 314 if (event_add(&ev, NULL) == -1) 315 exit(1); 316 event_dispatch(); 317 318 cleanup_test(); 319 } 320 321 static void 322 test_simplewrite(void) 323 { 324 struct event ev; 325 326 /* Very simple write test */ 327 setup_test("Simple write: "); 328 329 event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev); 330 if (event_add(&ev, NULL) == -1) 331 exit(1); 332 event_dispatch(); 333 334 cleanup_test(); 335 } 336 337 static void 338 simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg) 339 { 340 if (++called == 2) 341 test_ok = 1; 342 } 343 344 static void 345 test_simpleread_multiple(void) 346 { 347 struct event one, two; 348 349 /* Very simple read test */ 350 setup_test("Simple read to multiple evens: "); 351 352 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 353 tt_fail_perror("write"); 354 } 355 356 shutdown(pair[0], SHUT_WR); 357 358 event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL); 359 if (event_add(&one, NULL) == -1) 360 exit(1); 361 event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL); 362 if (event_add(&two, NULL) == -1) 363 exit(1); 364 event_dispatch(); 365 366 cleanup_test(); 367 } 368 369 static int have_closed = 0; 370 static int premature_event = 0; 371 static void 372 simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr) 373 { 374 evutil_socket_t **fds = ptr; 375 TT_BLATHER(("Closing")); 376 evutil_closesocket(*fds[0]); 377 evutil_closesocket(*fds[1]); 378 *fds[0] = -1; 379 *fds[1] = -1; 380 have_closed = 1; 381 } 382 383 static void 384 record_event_cb(evutil_socket_t s, short what, void *ptr) 385 { 386 short *whatp = ptr; 387 if (!have_closed) 388 premature_event = 1; 389 *whatp = what; 390 TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s)); 391 } 392 393 static void 394 test_simpleclose(void *ptr) 395 { 396 /* Test that a close of FD is detected as a read and as a write. */ 397 struct event_base *base = event_base_new(); 398 evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1}; 399 evutil_socket_t *to_close[2]; 400 struct event *rev=NULL, *wev=NULL, *closeev=NULL; 401 struct timeval tv; 402 short got_read_on_close = 0, got_write_on_close = 0; 403 char buf[1024]; 404 memset(buf, 99, sizeof(buf)); 405 #ifdef _WIN32 406 #define LOCAL_SOCKETPAIR_AF AF_INET 407 #else 408 #define LOCAL_SOCKETPAIR_AF AF_UNIX 409 #endif 410 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0) 411 TT_DIE(("socketpair: %s", strerror(errno))); 412 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0) 413 TT_DIE(("socketpair: %s", strerror(errno))); 414 if (evutil_make_socket_nonblocking(pair1[1]) < 0) 415 TT_DIE(("make_socket_nonblocking")); 416 if (evutil_make_socket_nonblocking(pair2[1]) < 0) 417 TT_DIE(("make_socket_nonblocking")); 418 419 /** Stuff pair2[1] full of data, until write fails */ 420 while (1) { 421 int r = write(pair2[1], buf, sizeof(buf)); 422 if (r<0) { 423 int err = evutil_socket_geterror(pair2[1]); 424 if (! EVUTIL_ERR_RW_RETRIABLE(err)) 425 TT_DIE(("write failed strangely: %s", 426 evutil_socket_error_to_string(err))); 427 break; 428 } 429 } 430 to_close[0] = &pair1[0]; 431 to_close[1] = &pair2[0]; 432 433 closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb, 434 to_close); 435 rev = event_new(base, pair1[1], EV_READ, record_event_cb, 436 &got_read_on_close); 437 TT_BLATHER(("Waiting for read on %d", (int)pair1[1])); 438 wev = event_new(base, pair2[1], EV_WRITE, record_event_cb, 439 &got_write_on_close); 440 TT_BLATHER(("Waiting for write on %d", (int)pair2[1])); 441 tv.tv_sec = 0; 442 tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make 443 * sure we get a read event. */ 444 event_add(closeev, &tv); 445 event_add(rev, NULL); 446 event_add(wev, NULL); 447 /* Don't let the test go on too long. */ 448 tv.tv_sec = 0; 449 tv.tv_usec = 200*1000; 450 event_base_loopexit(base, &tv); 451 event_base_loop(base, 0); 452 453 tt_int_op(got_read_on_close, ==, EV_READ); 454 tt_int_op(got_write_on_close, ==, EV_WRITE); 455 tt_int_op(premature_event, ==, 0); 456 457 end: 458 if (pair1[0] >= 0) 459 evutil_closesocket(pair1[0]); 460 if (pair1[1] >= 0) 461 evutil_closesocket(pair1[1]); 462 if (pair2[0] >= 0) 463 evutil_closesocket(pair2[0]); 464 if (pair2[1] >= 0) 465 evutil_closesocket(pair2[1]); 466 if (rev) 467 event_free(rev); 468 if (wev) 469 event_free(wev); 470 if (closeev) 471 event_free(closeev); 472 if (base) 473 event_base_free(base); 474 } 475 476 477 static void 478 test_multiple(void) 479 { 480 struct event ev, ev2; 481 int i; 482 483 /* Multiple read and write test */ 484 setup_test("Multiple read/write: "); 485 memset(rbuf, 0, sizeof(rbuf)); 486 for (i = 0; i < (int)sizeof(wbuf); i++) 487 wbuf[i] = i; 488 489 roff = woff = 0; 490 usepersist = 0; 491 492 event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev); 493 if (event_add(&ev, NULL) == -1) 494 exit(1); 495 event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2); 496 if (event_add(&ev2, NULL) == -1) 497 exit(1); 498 event_dispatch(); 499 500 if (roff == woff) 501 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 502 503 cleanup_test(); 504 } 505 506 static void 507 test_persistent(void) 508 { 509 struct event ev, ev2; 510 int i; 511 512 /* Multiple read and write test with persist */ 513 setup_test("Persist read/write: "); 514 memset(rbuf, 0, sizeof(rbuf)); 515 for (i = 0; i < (int)sizeof(wbuf); i++) 516 wbuf[i] = i; 517 518 roff = woff = 0; 519 usepersist = 1; 520 521 event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev); 522 if (event_add(&ev, NULL) == -1) 523 exit(1); 524 event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2); 525 if (event_add(&ev2, NULL) == -1) 526 exit(1); 527 event_dispatch(); 528 529 if (roff == woff) 530 test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0; 531 532 cleanup_test(); 533 } 534 535 static void 536 test_combined(void) 537 { 538 struct both r1, r2, w1, w2; 539 540 setup_test("Combined read/write: "); 541 memset(&r1, 0, sizeof(r1)); 542 memset(&r2, 0, sizeof(r2)); 543 memset(&w1, 0, sizeof(w1)); 544 memset(&w2, 0, sizeof(w2)); 545 546 w1.nread = 4096; 547 w2.nread = 8192; 548 549 event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1); 550 event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1); 551 event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2); 552 event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2); 553 tt_assert(event_add(&r1.ev, NULL) != -1); 554 tt_assert(!event_add(&w1.ev, NULL)); 555 tt_assert(!event_add(&r2.ev, NULL)); 556 tt_assert(!event_add(&w2.ev, NULL)); 557 event_dispatch(); 558 559 if (r1.nread == 8192 && r2.nread == 4096) 560 test_ok = 1; 561 562 end: 563 cleanup_test(); 564 } 565 566 static void 567 test_simpletimeout(void) 568 { 569 struct timeval tv; 570 struct event ev; 571 572 setup_test("Simple timeout: "); 573 574 tv.tv_usec = 200*1000; 575 tv.tv_sec = 0; 576 evutil_timerclear(&tcalled); 577 evtimer_set(&ev, timeout_cb, NULL); 578 evtimer_add(&ev, &tv); 579 580 evutil_gettimeofday(&tset, NULL); 581 event_dispatch(); 582 test_timeval_diff_eq(&tset, &tcalled, 200); 583 584 test_ok = 1; 585 end: 586 cleanup_test(); 587 } 588 589 static void 590 periodic_timeout_cb(evutil_socket_t fd, short event, void *arg) 591 { 592 int *count = arg; 593 594 (*count)++; 595 if (*count == 6) { 596 /* call loopexit only once - on slow machines(?), it is 597 * apparently possible for this to get called twice. */ 598 test_ok = 1; 599 event_base_loopexit(global_base, NULL); 600 } 601 } 602 603 static void 604 test_persistent_timeout(void) 605 { 606 struct timeval tv; 607 struct event ev; 608 int count = 0; 609 610 evutil_timerclear(&tv); 611 tv.tv_usec = 10000; 612 613 event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST, 614 periodic_timeout_cb, &count); 615 event_add(&ev, &tv); 616 617 event_dispatch(); 618 619 event_del(&ev); 620 } 621 622 static void 623 test_persistent_timeout_jump(void *ptr) 624 { 625 struct basic_test_data *data = ptr; 626 struct event ev; 627 int count = 0; 628 struct timeval msec100 = { 0, 100 * 1000 }; 629 struct timeval msec50 = { 0, 50 * 1000 }; 630 struct timeval msec300 = { 0, 300 * 1000 }; 631 632 event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count); 633 event_add(&ev, &msec100); 634 /* Wait for a bit */ 635 evutil_usleep_(&msec300); 636 event_base_loopexit(data->base, &msec50); 637 event_base_dispatch(data->base); 638 tt_int_op(count, ==, 1); 639 640 end: 641 event_del(&ev); 642 } 643 644 struct persist_active_timeout_called { 645 int n; 646 short events[16]; 647 struct timeval tvs[16]; 648 }; 649 650 static void 651 activate_cb(evutil_socket_t fd, short event, void *arg) 652 { 653 struct event *ev = arg; 654 event_active(ev, EV_READ, 1); 655 } 656 657 static void 658 persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg) 659 { 660 struct persist_active_timeout_called *c = arg; 661 if (c->n < 15) { 662 c->events[c->n] = event; 663 evutil_gettimeofday(&c->tvs[c->n], NULL); 664 ++c->n; 665 } 666 } 667 668 static void 669 test_persistent_active_timeout(void *ptr) 670 { 671 struct timeval tv, tv2, tv_exit, start; 672 struct event ev; 673 struct persist_active_timeout_called res; 674 675 struct basic_test_data *data = ptr; 676 struct event_base *base = data->base; 677 678 memset(&res, 0, sizeof(res)); 679 680 tv.tv_sec = 0; 681 tv.tv_usec = 200 * 1000; 682 event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST, 683 persist_active_timeout_cb, &res); 684 event_add(&ev, &tv); 685 686 tv2.tv_sec = 0; 687 tv2.tv_usec = 100 * 1000; 688 event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2); 689 690 tv_exit.tv_sec = 0; 691 tv_exit.tv_usec = 600 * 1000; 692 event_base_loopexit(base, &tv_exit); 693 694 event_base_assert_ok_(base); 695 evutil_gettimeofday(&start, NULL); 696 697 event_base_dispatch(base); 698 event_base_assert_ok_(base); 699 700 tt_int_op(res.n, ==, 3); 701 tt_int_op(res.events[0], ==, EV_READ); 702 tt_int_op(res.events[1], ==, EV_TIMEOUT); 703 tt_int_op(res.events[2], ==, EV_TIMEOUT); 704 test_timeval_diff_eq(&start, &res.tvs[0], 100); 705 test_timeval_diff_eq(&start, &res.tvs[1], 300); 706 test_timeval_diff_eq(&start, &res.tvs[2], 500); 707 end: 708 event_del(&ev); 709 } 710 711 struct common_timeout_info { 712 struct event ev; 713 struct timeval called_at; 714 int which; 715 int count; 716 }; 717 718 static void 719 common_timeout_cb(evutil_socket_t fd, short event, void *arg) 720 { 721 struct common_timeout_info *ti = arg; 722 ++ti->count; 723 evutil_gettimeofday(&ti->called_at, NULL); 724 if (ti->count >= 4) 725 event_del(&ti->ev); 726 } 727 728 static void 729 test_common_timeout(void *ptr) 730 { 731 struct basic_test_data *data = ptr; 732 733 struct event_base *base = data->base; 734 int i; 735 struct common_timeout_info info[100]; 736 737 struct timeval start; 738 struct timeval tmp_100_ms = { 0, 100*1000 }; 739 struct timeval tmp_200_ms = { 0, 200*1000 }; 740 struct timeval tmp_5_sec = { 5, 0 }; 741 struct timeval tmp_5M_usec = { 0, 5*1000*1000 }; 742 743 const struct timeval *ms_100, *ms_200, *sec_5; 744 745 ms_100 = event_base_init_common_timeout(base, &tmp_100_ms); 746 ms_200 = event_base_init_common_timeout(base, &tmp_200_ms); 747 sec_5 = event_base_init_common_timeout(base, &tmp_5_sec); 748 tt_assert(ms_100); 749 tt_assert(ms_200); 750 tt_assert(sec_5); 751 tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms), 752 ==, ms_200); 753 tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200); 754 tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5); 755 tt_int_op(ms_100->tv_sec, ==, 0); 756 tt_int_op(ms_200->tv_sec, ==, 0); 757 tt_int_op(sec_5->tv_sec, ==, 5); 758 tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000); 759 tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000); 760 tt_int_op(sec_5->tv_usec, ==, 0|0x50200000); 761 762 memset(info, 0, sizeof(info)); 763 764 for (i=0; i<100; ++i) { 765 info[i].which = i; 766 event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST, 767 common_timeout_cb, &info[i]); 768 if (i % 2) { 769 if ((i%20)==1) { 770 /* Glass-box test: Make sure we survive the 771 * transition to non-common timeouts. It's 772 * a little tricky. */ 773 event_add(&info[i].ev, ms_200); 774 event_add(&info[i].ev, &tmp_100_ms); 775 } else if ((i%20)==3) { 776 /* Check heap-to-common too. */ 777 event_add(&info[i].ev, &tmp_200_ms); 778 event_add(&info[i].ev, ms_100); 779 } else if ((i%20)==5) { 780 /* Also check common-to-common. */ 781 event_add(&info[i].ev, ms_200); 782 event_add(&info[i].ev, ms_100); 783 } else { 784 event_add(&info[i].ev, ms_100); 785 } 786 } else { 787 event_add(&info[i].ev, ms_200); 788 } 789 } 790 791 event_base_assert_ok_(base); 792 evutil_gettimeofday(&start, NULL); 793 event_base_dispatch(base); 794 795 event_base_assert_ok_(base); 796 797 for (i=0; i<10; ++i) { 798 tt_int_op(info[i].count, ==, 4); 799 if (i % 2) { 800 test_timeval_diff_eq(&start, &info[i].called_at, 400); 801 } else { 802 test_timeval_diff_eq(&start, &info[i].called_at, 800); 803 } 804 } 805 806 /* Make sure we can free the base with some events in. */ 807 for (i=0; i<100; ++i) { 808 if (i % 2) { 809 event_add(&info[i].ev, ms_100); 810 } else { 811 event_add(&info[i].ev, ms_200); 812 } 813 } 814 815 end: 816 event_base_free(data->base); /* need to do this here before info is 817 * out-of-scope */ 818 data->base = NULL; 819 } 820 821 #ifndef _WIN32 822 static void signal_cb(evutil_socket_t fd, short event, void *arg); 823 824 #define current_base event_global_current_base_ 825 extern struct event_base *current_base; 826 827 static void 828 child_signal_cb(evutil_socket_t fd, short event, void *arg) 829 { 830 struct timeval tv; 831 int *pint = arg; 832 833 *pint = 1; 834 835 tv.tv_usec = 500000; 836 tv.tv_sec = 0; 837 event_loopexit(&tv); 838 } 839 840 static void 841 test_fork(void) 842 { 843 int status, got_sigchld = 0; 844 struct event ev, sig_ev; 845 pid_t pid; 846 847 setup_test("After fork: "); 848 849 tt_assert(current_base); 850 evthread_make_base_notifiable(current_base); 851 852 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 853 tt_fail_perror("write"); 854 } 855 856 event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev); 857 if (event_add(&ev, NULL) == -1) 858 exit(1); 859 860 evsignal_set(&sig_ev, SIGCHLD, child_signal_cb, &got_sigchld); 861 evsignal_add(&sig_ev, NULL); 862 863 event_base_assert_ok_(current_base); 864 TT_BLATHER(("Before fork")); 865 if ((pid = regress_fork()) == 0) { 866 /* in the child */ 867 TT_BLATHER(("In child, before reinit")); 868 event_base_assert_ok_(current_base); 869 if (event_reinit(current_base) == -1) { 870 fprintf(stdout, "FAILED (reinit)\n"); 871 exit(1); 872 } 873 TT_BLATHER(("After reinit")); 874 event_base_assert_ok_(current_base); 875 TT_BLATHER(("After assert-ok")); 876 877 evsignal_del(&sig_ev); 878 879 called = 0; 880 881 event_dispatch(); 882 883 event_base_free(current_base); 884 885 /* we do not send an EOF; simple_read_cb requires an EOF 886 * to set test_ok. we just verify that the callback was 887 * called. */ 888 exit(test_ok != 0 || called != 2 ? -2 : 76); 889 } 890 891 /* wait for the child to read the data */ 892 { 893 const struct timeval tv = { 0, 100000 }; 894 evutil_usleep_(&tv); 895 } 896 897 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 898 tt_fail_perror("write"); 899 } 900 901 TT_BLATHER(("Before waitpid")); 902 if (waitpid(pid, &status, 0) == -1) { 903 fprintf(stdout, "FAILED (fork)\n"); 904 exit(1); 905 } 906 TT_BLATHER(("After waitpid")); 907 908 if (WEXITSTATUS(status) != 76) { 909 fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status)); 910 exit(1); 911 } 912 913 /* test that the current event loop still works */ 914 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 915 fprintf(stderr, "%s: write\n", __func__); 916 } 917 918 shutdown(pair[0], SHUT_WR); 919 920 event_dispatch(); 921 922 if (!got_sigchld) { 923 fprintf(stdout, "FAILED (sigchld)\n"); 924 exit(1); 925 } 926 927 evsignal_del(&sig_ev); 928 929 end: 930 cleanup_test(); 931 } 932 933 static void 934 signal_cb_sa(int sig) 935 { 936 test_ok = 2; 937 } 938 939 static void 940 signal_cb(evutil_socket_t fd, short event, void *arg) 941 { 942 struct event *ev = arg; 943 944 evsignal_del(ev); 945 test_ok = 1; 946 } 947 948 static void 949 test_simplesignal_impl(int find_reorder) 950 { 951 struct event ev; 952 struct itimerval itv; 953 954 evsignal_set(&ev, SIGALRM, signal_cb, &ev); 955 evsignal_add(&ev, NULL); 956 /* find bugs in which operations are re-ordered */ 957 if (find_reorder) { 958 evsignal_del(&ev); 959 evsignal_add(&ev, NULL); 960 } 961 962 memset(&itv, 0, sizeof(itv)); 963 itv.it_value.tv_sec = 0; 964 itv.it_value.tv_usec = 100000; 965 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 966 goto skip_simplesignal; 967 968 event_dispatch(); 969 skip_simplesignal: 970 if (evsignal_del(&ev) == -1) 971 test_ok = 0; 972 973 cleanup_test(); 974 } 975 976 static void 977 test_simplestsignal(void) 978 { 979 setup_test("Simplest one signal: "); 980 test_simplesignal_impl(0); 981 } 982 983 static void 984 test_simplesignal(void) 985 { 986 setup_test("Simple signal: "); 987 test_simplesignal_impl(1); 988 } 989 990 static void 991 test_multiplesignal(void) 992 { 993 struct event ev_one, ev_two; 994 struct itimerval itv; 995 996 setup_test("Multiple signal: "); 997 998 evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one); 999 evsignal_add(&ev_one, NULL); 1000 1001 evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two); 1002 evsignal_add(&ev_two, NULL); 1003 1004 memset(&itv, 0, sizeof(itv)); 1005 itv.it_value.tv_sec = 0; 1006 itv.it_value.tv_usec = 100000; 1007 if (setitimer(ITIMER_REAL, &itv, NULL) == -1) 1008 goto skip_simplesignal; 1009 1010 event_dispatch(); 1011 1012 skip_simplesignal: 1013 if (evsignal_del(&ev_one) == -1) 1014 test_ok = 0; 1015 if (evsignal_del(&ev_two) == -1) 1016 test_ok = 0; 1017 1018 cleanup_test(); 1019 } 1020 1021 static void 1022 test_immediatesignal(void) 1023 { 1024 struct event ev; 1025 1026 test_ok = 0; 1027 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1028 evsignal_add(&ev, NULL); 1029 raise(SIGUSR1); 1030 event_loop(EVLOOP_NONBLOCK); 1031 evsignal_del(&ev); 1032 cleanup_test(); 1033 } 1034 1035 static void 1036 test_signal_dealloc(void) 1037 { 1038 /* make sure that evsignal_event is event_del'ed and pipe closed */ 1039 struct event ev; 1040 struct event_base *base = event_init(); 1041 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1042 evsignal_add(&ev, NULL); 1043 evsignal_del(&ev); 1044 event_base_free(base); 1045 /* If we got here without asserting, we're fine. */ 1046 test_ok = 1; 1047 cleanup_test(); 1048 } 1049 1050 static void 1051 test_signal_pipeloss(void) 1052 { 1053 /* make sure that the base1 pipe is closed correctly. */ 1054 struct event_base *base1, *base2; 1055 int pipe1; 1056 test_ok = 0; 1057 base1 = event_init(); 1058 pipe1 = base1->sig.ev_signal_pair[0]; 1059 base2 = event_init(); 1060 event_base_free(base2); 1061 event_base_free(base1); 1062 if (close(pipe1) != -1 || errno!=EBADF) { 1063 /* fd must be closed, so second close gives -1, EBADF */ 1064 printf("signal pipe not closed. "); 1065 test_ok = 0; 1066 } else { 1067 test_ok = 1; 1068 } 1069 cleanup_test(); 1070 } 1071 1072 /* 1073 * make two bases to catch signals, use both of them. this only works 1074 * for event mechanisms that use our signal pipe trick. kqueue handles 1075 * signals internally, and all interested kqueues get all the signals. 1076 */ 1077 static void 1078 test_signal_switchbase(void) 1079 { 1080 struct event ev1, ev2; 1081 struct event_base *base1, *base2; 1082 int is_kqueue; 1083 test_ok = 0; 1084 base1 = event_init(); 1085 base2 = event_init(); 1086 is_kqueue = !strcmp(event_get_method(),"kqueue"); 1087 evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1); 1088 evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2); 1089 if (event_base_set(base1, &ev1) || 1090 event_base_set(base2, &ev2) || 1091 event_add(&ev1, NULL) || 1092 event_add(&ev2, NULL)) { 1093 fprintf(stderr, "%s: cannot set base, add\n", __func__); 1094 exit(1); 1095 } 1096 1097 tt_ptr_op(event_get_base(&ev1), ==, base1); 1098 tt_ptr_op(event_get_base(&ev2), ==, base2); 1099 1100 test_ok = 0; 1101 /* can handle signal before loop is called */ 1102 raise(SIGUSR1); 1103 event_base_loop(base2, EVLOOP_NONBLOCK); 1104 if (is_kqueue) { 1105 if (!test_ok) 1106 goto end; 1107 test_ok = 0; 1108 } 1109 event_base_loop(base1, EVLOOP_NONBLOCK); 1110 if (test_ok && !is_kqueue) { 1111 test_ok = 0; 1112 1113 /* set base1 to handle signals */ 1114 event_base_loop(base1, EVLOOP_NONBLOCK); 1115 raise(SIGUSR1); 1116 event_base_loop(base1, EVLOOP_NONBLOCK); 1117 event_base_loop(base2, EVLOOP_NONBLOCK); 1118 } 1119 end: 1120 event_base_free(base1); 1121 event_base_free(base2); 1122 cleanup_test(); 1123 } 1124 1125 /* 1126 * assert that a signal event removed from the event queue really is 1127 * removed - with no possibility of it's parent handler being fired. 1128 */ 1129 static void 1130 test_signal_assert(void) 1131 { 1132 struct event ev; 1133 struct event_base *base = event_init(); 1134 test_ok = 0; 1135 /* use SIGCONT so we don't kill ourselves when we signal to nowhere */ 1136 evsignal_set(&ev, SIGCONT, signal_cb, &ev); 1137 evsignal_add(&ev, NULL); 1138 /* 1139 * if evsignal_del() fails to reset the handler, it's current handler 1140 * will still point to evsig_handler(). 1141 */ 1142 evsignal_del(&ev); 1143 1144 raise(SIGCONT); 1145 #if 0 1146 /* only way to verify we were in evsig_handler() */ 1147 /* XXXX Now there's no longer a good way. */ 1148 if (base->sig.evsig_caught) 1149 test_ok = 0; 1150 else 1151 test_ok = 1; 1152 #else 1153 test_ok = 1; 1154 #endif 1155 1156 event_base_free(base); 1157 cleanup_test(); 1158 return; 1159 } 1160 1161 /* 1162 * assert that we restore our previous signal handler properly. 1163 */ 1164 static void 1165 test_signal_restore(void) 1166 { 1167 struct event ev; 1168 struct event_base *base = event_init(); 1169 #ifdef EVENT__HAVE_SIGACTION 1170 struct sigaction sa; 1171 #endif 1172 1173 test_ok = 0; 1174 #ifdef EVENT__HAVE_SIGACTION 1175 sa.sa_handler = signal_cb_sa; 1176 sa.sa_flags = 0x0; 1177 sigemptyset(&sa.sa_mask); 1178 if (sigaction(SIGUSR1, &sa, NULL) == -1) 1179 goto out; 1180 #else 1181 if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR) 1182 goto out; 1183 #endif 1184 evsignal_set(&ev, SIGUSR1, signal_cb, &ev); 1185 evsignal_add(&ev, NULL); 1186 evsignal_del(&ev); 1187 1188 raise(SIGUSR1); 1189 /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */ 1190 if (test_ok != 2) 1191 test_ok = 0; 1192 out: 1193 event_base_free(base); 1194 cleanup_test(); 1195 return; 1196 } 1197 1198 static void 1199 signal_cb_swp(int sig, short event, void *arg) 1200 { 1201 called++; 1202 if (called < 5) 1203 raise(sig); 1204 else 1205 event_loopexit(NULL); 1206 } 1207 static void 1208 timeout_cb_swp(evutil_socket_t fd, short event, void *arg) 1209 { 1210 if (called == -1) { 1211 struct timeval tv = {5, 0}; 1212 1213 called = 0; 1214 evtimer_add((struct event *)arg, &tv); 1215 raise(SIGUSR1); 1216 return; 1217 } 1218 test_ok = 0; 1219 event_loopexit(NULL); 1220 } 1221 1222 static void 1223 test_signal_while_processing(void) 1224 { 1225 struct event_base *base = event_init(); 1226 struct event ev, ev_timer; 1227 struct timeval tv = {0, 0}; 1228 1229 setup_test("Receiving a signal while processing other signal: "); 1230 1231 called = -1; 1232 test_ok = 1; 1233 signal_set(&ev, SIGUSR1, signal_cb_swp, NULL); 1234 signal_add(&ev, NULL); 1235 evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer); 1236 evtimer_add(&ev_timer, &tv); 1237 event_dispatch(); 1238 1239 event_base_free(base); 1240 cleanup_test(); 1241 return; 1242 } 1243 #endif 1244 1245 static void 1246 test_free_active_base(void *ptr) 1247 { 1248 struct basic_test_data *data = ptr; 1249 struct event_base *base1; 1250 struct event ev1; 1251 1252 base1 = event_init(); 1253 if (base1) { 1254 event_assign(&ev1, base1, data->pair[1], EV_READ, 1255 dummy_read_cb, NULL); 1256 event_add(&ev1, NULL); 1257 event_base_free(base1); /* should not crash */ 1258 } else { 1259 tt_fail_msg("failed to create event_base for test"); 1260 } 1261 1262 base1 = event_init(); 1263 tt_assert(base1); 1264 event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL); 1265 event_active(&ev1, EV_READ, 1); 1266 event_base_free(base1); 1267 end: 1268 ; 1269 } 1270 1271 static void 1272 test_manipulate_active_events(void *ptr) 1273 { 1274 struct basic_test_data *data = ptr; 1275 struct event_base *base = data->base; 1276 struct event ev1; 1277 1278 event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL); 1279 1280 /* Make sure an active event is pending. */ 1281 event_active(&ev1, EV_READ, 1); 1282 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1283 ==, EV_READ); 1284 1285 /* Make sure that activating an event twice works. */ 1286 event_active(&ev1, EV_WRITE, 1); 1287 tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL), 1288 ==, EV_READ|EV_WRITE); 1289 1290 end: 1291 event_del(&ev1); 1292 } 1293 1294 static void 1295 event_selfarg_cb(evutil_socket_t fd, short event, void *arg) 1296 { 1297 struct event *ev = arg; 1298 struct event_base *base = event_get_base(ev); 1299 event_base_assert_ok_(base); 1300 event_base_loopexit(base, NULL); 1301 tt_want(ev == event_base_get_running_event(base)); 1302 } 1303 1304 static void 1305 test_event_new_selfarg(void *ptr) 1306 { 1307 struct basic_test_data *data = ptr; 1308 struct event_base *base = data->base; 1309 struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb, 1310 event_self_cbarg()); 1311 1312 event_active(ev, EV_READ, 1); 1313 event_base_dispatch(base); 1314 1315 event_free(ev); 1316 } 1317 1318 static void 1319 test_event_assign_selfarg(void *ptr) 1320 { 1321 struct basic_test_data *data = ptr; 1322 struct event_base *base = data->base; 1323 struct event ev; 1324 1325 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1326 event_self_cbarg()); 1327 event_active(&ev, EV_READ, 1); 1328 event_base_dispatch(base); 1329 } 1330 1331 static void 1332 test_event_base_get_num_events(void *ptr) 1333 { 1334 struct basic_test_data *data = ptr; 1335 struct event_base *base = data->base; 1336 struct event ev; 1337 int event_count_active; 1338 int event_count_virtual; 1339 int event_count_added; 1340 int event_count_active_virtual; 1341 int event_count_active_added; 1342 int event_count_virtual_added; 1343 int event_count_active_added_virtual; 1344 1345 struct timeval qsec = {0, 100000}; 1346 1347 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1348 event_self_cbarg()); 1349 1350 event_add(&ev, &qsec); 1351 event_count_active = event_base_get_num_events(base, 1352 EVENT_BASE_COUNT_ACTIVE); 1353 event_count_virtual = event_base_get_num_events(base, 1354 EVENT_BASE_COUNT_VIRTUAL); 1355 event_count_added = event_base_get_num_events(base, 1356 EVENT_BASE_COUNT_ADDED); 1357 event_count_active_virtual = event_base_get_num_events(base, 1358 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1359 event_count_active_added = event_base_get_num_events(base, 1360 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1361 event_count_virtual_added = event_base_get_num_events(base, 1362 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1363 event_count_active_added_virtual = event_base_get_num_events(base, 1364 EVENT_BASE_COUNT_ACTIVE| 1365 EVENT_BASE_COUNT_ADDED| 1366 EVENT_BASE_COUNT_VIRTUAL); 1367 tt_int_op(event_count_active, ==, 0); 1368 tt_int_op(event_count_virtual, ==, 0); 1369 /* libevent itself adds a timeout event, so the event_count is 2 here */ 1370 tt_int_op(event_count_added, ==, 2); 1371 tt_int_op(event_count_active_virtual, ==, 0); 1372 tt_int_op(event_count_active_added, ==, 2); 1373 tt_int_op(event_count_virtual_added, ==, 2); 1374 tt_int_op(event_count_active_added_virtual, ==, 2); 1375 1376 event_active(&ev, EV_READ, 1); 1377 event_count_active = event_base_get_num_events(base, 1378 EVENT_BASE_COUNT_ACTIVE); 1379 event_count_virtual = event_base_get_num_events(base, 1380 EVENT_BASE_COUNT_VIRTUAL); 1381 event_count_added = event_base_get_num_events(base, 1382 EVENT_BASE_COUNT_ADDED); 1383 event_count_active_virtual = event_base_get_num_events(base, 1384 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1385 event_count_active_added = event_base_get_num_events(base, 1386 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1387 event_count_virtual_added = event_base_get_num_events(base, 1388 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1389 event_count_active_added_virtual = event_base_get_num_events(base, 1390 EVENT_BASE_COUNT_ACTIVE| 1391 EVENT_BASE_COUNT_ADDED| 1392 EVENT_BASE_COUNT_VIRTUAL); 1393 tt_int_op(event_count_active, ==, 1); 1394 tt_int_op(event_count_virtual, ==, 0); 1395 tt_int_op(event_count_added, ==, 3); 1396 tt_int_op(event_count_active_virtual, ==, 1); 1397 tt_int_op(event_count_active_added, ==, 4); 1398 tt_int_op(event_count_virtual_added, ==, 3); 1399 tt_int_op(event_count_active_added_virtual, ==, 4); 1400 1401 event_base_loop(base, 0); 1402 event_count_active = event_base_get_num_events(base, 1403 EVENT_BASE_COUNT_ACTIVE); 1404 event_count_virtual = event_base_get_num_events(base, 1405 EVENT_BASE_COUNT_VIRTUAL); 1406 event_count_added = event_base_get_num_events(base, 1407 EVENT_BASE_COUNT_ADDED); 1408 event_count_active_virtual = event_base_get_num_events(base, 1409 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1410 event_count_active_added = event_base_get_num_events(base, 1411 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1412 event_count_virtual_added = event_base_get_num_events(base, 1413 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1414 event_count_active_added_virtual = event_base_get_num_events(base, 1415 EVENT_BASE_COUNT_ACTIVE| 1416 EVENT_BASE_COUNT_ADDED| 1417 EVENT_BASE_COUNT_VIRTUAL); 1418 tt_int_op(event_count_active, ==, 0); 1419 tt_int_op(event_count_virtual, ==, 0); 1420 tt_int_op(event_count_added, ==, 0); 1421 tt_int_op(event_count_active_virtual, ==, 0); 1422 tt_int_op(event_count_active_added, ==, 0); 1423 tt_int_op(event_count_virtual_added, ==, 0); 1424 tt_int_op(event_count_active_added_virtual, ==, 0); 1425 1426 event_base_add_virtual_(base); 1427 event_count_active = event_base_get_num_events(base, 1428 EVENT_BASE_COUNT_ACTIVE); 1429 event_count_virtual = event_base_get_num_events(base, 1430 EVENT_BASE_COUNT_VIRTUAL); 1431 event_count_added = event_base_get_num_events(base, 1432 EVENT_BASE_COUNT_ADDED); 1433 event_count_active_virtual = event_base_get_num_events(base, 1434 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL); 1435 event_count_active_added = event_base_get_num_events(base, 1436 EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED); 1437 event_count_virtual_added = event_base_get_num_events(base, 1438 EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED); 1439 event_count_active_added_virtual = event_base_get_num_events(base, 1440 EVENT_BASE_COUNT_ACTIVE| 1441 EVENT_BASE_COUNT_ADDED| 1442 EVENT_BASE_COUNT_VIRTUAL); 1443 tt_int_op(event_count_active, ==, 0); 1444 tt_int_op(event_count_virtual, ==, 1); 1445 tt_int_op(event_count_added, ==, 0); 1446 tt_int_op(event_count_active_virtual, ==, 1); 1447 tt_int_op(event_count_active_added, ==, 0); 1448 tt_int_op(event_count_virtual_added, ==, 1); 1449 tt_int_op(event_count_active_added_virtual, ==, 1); 1450 1451 end: 1452 ; 1453 } 1454 1455 static void 1456 test_event_base_get_max_events(void *ptr) 1457 { 1458 struct basic_test_data *data = ptr; 1459 struct event_base *base = data->base; 1460 struct event ev; 1461 struct event ev2; 1462 int event_count_active; 1463 int event_count_virtual; 1464 int event_count_added; 1465 int event_count_active_virtual; 1466 int event_count_active_added; 1467 int event_count_virtual_added; 1468 int event_count_active_added_virtual; 1469 1470 struct timeval qsec = {0, 100000}; 1471 1472 event_assign(&ev, base, -1, EV_READ, event_selfarg_cb, 1473 event_self_cbarg()); 1474 event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb, 1475 event_self_cbarg()); 1476 1477 event_add(&ev, &qsec); 1478 event_add(&ev2, &qsec); 1479 event_del(&ev2); 1480 1481 event_count_active = event_base_get_max_events(base, 1482 EVENT_BASE_COUNT_ACTIVE, 0); 1483 event_count_virtual = event_base_get_max_events(base, 1484 EVENT_BASE_COUNT_VIRTUAL, 0); 1485 event_count_added = event_base_get_max_events(base, 1486 EVENT_BASE_COUNT_ADDED, 0); 1487 event_count_active_virtual = event_base_get_max_events(base, 1488 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1489 event_count_active_added = event_base_get_max_events(base, 1490 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1491 event_count_virtual_added = event_base_get_max_events(base, 1492 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1493 event_count_active_added_virtual = event_base_get_max_events(base, 1494 EVENT_BASE_COUNT_ACTIVE | 1495 EVENT_BASE_COUNT_ADDED | 1496 EVENT_BASE_COUNT_VIRTUAL, 0); 1497 1498 tt_int_op(event_count_active, ==, 0); 1499 tt_int_op(event_count_virtual, ==, 0); 1500 /* libevent itself adds a timeout event, so the event_count is 4 here */ 1501 tt_int_op(event_count_added, ==, 4); 1502 tt_int_op(event_count_active_virtual, ==, 0); 1503 tt_int_op(event_count_active_added, ==, 4); 1504 tt_int_op(event_count_virtual_added, ==, 4); 1505 tt_int_op(event_count_active_added_virtual, ==, 4); 1506 1507 event_active(&ev, EV_READ, 1); 1508 event_count_active = event_base_get_max_events(base, 1509 EVENT_BASE_COUNT_ACTIVE, 0); 1510 event_count_virtual = event_base_get_max_events(base, 1511 EVENT_BASE_COUNT_VIRTUAL, 0); 1512 event_count_added = event_base_get_max_events(base, 1513 EVENT_BASE_COUNT_ADDED, 0); 1514 event_count_active_virtual = event_base_get_max_events(base, 1515 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1516 event_count_active_added = event_base_get_max_events(base, 1517 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1518 event_count_virtual_added = event_base_get_max_events(base, 1519 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1520 event_count_active_added_virtual = event_base_get_max_events(base, 1521 EVENT_BASE_COUNT_ACTIVE | 1522 EVENT_BASE_COUNT_ADDED | 1523 EVENT_BASE_COUNT_VIRTUAL, 0); 1524 1525 tt_int_op(event_count_active, ==, 1); 1526 tt_int_op(event_count_virtual, ==, 0); 1527 tt_int_op(event_count_added, ==, 4); 1528 tt_int_op(event_count_active_virtual, ==, 1); 1529 tt_int_op(event_count_active_added, ==, 5); 1530 tt_int_op(event_count_virtual_added, ==, 4); 1531 tt_int_op(event_count_active_added_virtual, ==, 5); 1532 1533 event_base_loop(base, 0); 1534 event_count_active = event_base_get_max_events(base, 1535 EVENT_BASE_COUNT_ACTIVE, 1); 1536 event_count_virtual = event_base_get_max_events(base, 1537 EVENT_BASE_COUNT_VIRTUAL, 1); 1538 event_count_added = event_base_get_max_events(base, 1539 EVENT_BASE_COUNT_ADDED, 1); 1540 event_count_active_virtual = event_base_get_max_events(base, 1541 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1542 event_count_active_added = event_base_get_max_events(base, 1543 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1544 event_count_virtual_added = event_base_get_max_events(base, 1545 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1546 event_count_active_added_virtual = event_base_get_max_events(base, 1547 EVENT_BASE_COUNT_ACTIVE | 1548 EVENT_BASE_COUNT_ADDED | 1549 EVENT_BASE_COUNT_VIRTUAL, 1); 1550 1551 tt_int_op(event_count_active, ==, 1); 1552 tt_int_op(event_count_virtual, ==, 0); 1553 tt_int_op(event_count_added, ==, 4); 1554 tt_int_op(event_count_active_virtual, ==, 0); 1555 tt_int_op(event_count_active_added, ==, 0); 1556 tt_int_op(event_count_virtual_added, ==, 0); 1557 tt_int_op(event_count_active_added_virtual, ==, 0); 1558 1559 event_count_active = event_base_get_max_events(base, 1560 EVENT_BASE_COUNT_ACTIVE, 0); 1561 event_count_virtual = event_base_get_max_events(base, 1562 EVENT_BASE_COUNT_VIRTUAL, 0); 1563 event_count_added = event_base_get_max_events(base, 1564 EVENT_BASE_COUNT_ADDED, 0); 1565 tt_int_op(event_count_active, ==, 0); 1566 tt_int_op(event_count_virtual, ==, 0); 1567 tt_int_op(event_count_added, ==, 0); 1568 1569 event_base_add_virtual_(base); 1570 event_count_active = event_base_get_max_events(base, 1571 EVENT_BASE_COUNT_ACTIVE, 0); 1572 event_count_virtual = event_base_get_max_events(base, 1573 EVENT_BASE_COUNT_VIRTUAL, 0); 1574 event_count_added = event_base_get_max_events(base, 1575 EVENT_BASE_COUNT_ADDED, 0); 1576 event_count_active_virtual = event_base_get_max_events(base, 1577 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0); 1578 event_count_active_added = event_base_get_max_events(base, 1579 EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0); 1580 event_count_virtual_added = event_base_get_max_events(base, 1581 EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0); 1582 event_count_active_added_virtual = event_base_get_max_events(base, 1583 EVENT_BASE_COUNT_ACTIVE | 1584 EVENT_BASE_COUNT_ADDED | 1585 EVENT_BASE_COUNT_VIRTUAL, 0); 1586 1587 tt_int_op(event_count_active, ==, 0); 1588 tt_int_op(event_count_virtual, ==, 1); 1589 tt_int_op(event_count_added, ==, 0); 1590 tt_int_op(event_count_active_virtual, ==, 1); 1591 tt_int_op(event_count_active_added, ==, 0); 1592 tt_int_op(event_count_virtual_added, ==, 1); 1593 tt_int_op(event_count_active_added_virtual, ==, 1); 1594 1595 end: 1596 ; 1597 } 1598 1599 static void 1600 test_bad_assign(void *ptr) 1601 { 1602 struct event ev; 1603 int r; 1604 /* READ|SIGNAL is not allowed */ 1605 r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL); 1606 tt_int_op(r,==,-1); 1607 1608 end: 1609 ; 1610 } 1611 1612 static int reentrant_cb_run = 0; 1613 1614 static void 1615 bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr) 1616 { 1617 struct event_base *base = ptr; 1618 int r; 1619 reentrant_cb_run = 1; 1620 /* This reentrant call to event_base_loop should be detected and 1621 * should fail */ 1622 r = event_base_loop(base, 0); 1623 tt_int_op(r, ==, -1); 1624 end: 1625 ; 1626 } 1627 1628 static void 1629 test_bad_reentrant(void *ptr) 1630 { 1631 struct basic_test_data *data = ptr; 1632 struct event_base *base = data->base; 1633 struct event ev; 1634 int r; 1635 event_assign(&ev, base, -1, 1636 0, bad_reentrant_run_loop_cb, base); 1637 1638 event_active(&ev, EV_WRITE, 1); 1639 r = event_base_loop(base, 0); 1640 tt_int_op(r, ==, 1); 1641 tt_int_op(reentrant_cb_run, ==, 1); 1642 end: 1643 ; 1644 } 1645 1646 static int n_write_a_byte_cb=0; 1647 static int n_read_and_drain_cb=0; 1648 static int n_activate_other_event_cb=0; 1649 static void 1650 write_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1651 { 1652 char buf[] = "x"; 1653 if (write(fd, buf, 1) == 1) 1654 ++n_write_a_byte_cb; 1655 } 1656 static void 1657 read_and_drain_cb(evutil_socket_t fd, short what, void *arg) 1658 { 1659 char buf[128]; 1660 int n; 1661 ++n_read_and_drain_cb; 1662 while ((n = read(fd, buf, sizeof(buf))) > 0) 1663 ; 1664 } 1665 1666 static void 1667 activate_other_event_cb(evutil_socket_t fd, short what, void *other_) 1668 { 1669 struct event *ev_activate = other_; 1670 ++n_activate_other_event_cb; 1671 event_active_later_(ev_activate, EV_READ); 1672 } 1673 1674 static void 1675 test_active_later(void *ptr) 1676 { 1677 struct basic_test_data *data = ptr; 1678 struct event *ev1 = NULL, *ev2 = NULL; 1679 struct event ev3, ev4; 1680 struct timeval qsec = {0, 100000}; 1681 ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL); 1682 ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL); 1683 event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4); 1684 event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3); 1685 event_add(ev1, NULL); 1686 event_add(ev2, NULL); 1687 event_active_later_(&ev3, EV_READ); 1688 1689 event_base_loopexit(data->base, &qsec); 1690 1691 event_base_loop(data->base, 0); 1692 1693 TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.", 1694 n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb)); 1695 event_del(&ev3); 1696 event_del(&ev4); 1697 1698 tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb); 1699 tt_int_op(n_write_a_byte_cb, >, 100); 1700 tt_int_op(n_read_and_drain_cb, >, 100); 1701 tt_int_op(n_activate_other_event_cb, >, 100); 1702 1703 event_active_later_(&ev4, EV_READ); 1704 event_active(&ev4, EV_READ, 1); /* This should make the event 1705 active immediately. */ 1706 tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0); 1707 tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0); 1708 1709 /* Now leave this one around, so that event_free sees it and removes 1710 * it. */ 1711 event_active_later_(&ev3, EV_READ); 1712 event_base_assert_ok_(data->base); 1713 1714 end: 1715 if (ev1) 1716 event_free(ev1); 1717 if (ev2) 1718 event_free(ev2); 1719 1720 event_base_free(data->base); 1721 data->base = NULL; 1722 } 1723 1724 1725 static void incr_arg_cb(evutil_socket_t fd, short what, void *arg) 1726 { 1727 int *intptr = arg; 1728 (void) fd; (void) what; 1729 ++*intptr; 1730 } 1731 static void remove_timers_cb(evutil_socket_t fd, short what, void *arg) 1732 { 1733 struct event **ep = arg; 1734 (void) fd; (void) what; 1735 event_remove_timer(ep[0]); 1736 event_remove_timer(ep[1]); 1737 } 1738 static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg) 1739 { 1740 evutil_socket_t *sockp = arg; 1741 (void) fd; (void) what; 1742 (void) write(*sockp, "A", 1); 1743 } 1744 struct read_not_timeout_param 1745 { 1746 struct event **ev; 1747 int events; 1748 int count; 1749 }; 1750 static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg) 1751 { 1752 struct read_not_timeout_param *rntp = arg; 1753 char c; 1754 ev_ssize_t n; 1755 (void) fd; (void) what; 1756 n = read(fd, &c, 1); 1757 tt_int_op(n, ==, 1); 1758 rntp->events |= what; 1759 ++rntp->count; 1760 if(2 == rntp->count) event_del(rntp->ev[0]); 1761 end: 1762 ; 1763 } 1764 1765 static void 1766 test_event_remove_timeout(void *ptr) 1767 { 1768 struct basic_test_data *data = ptr; 1769 struct event_base *base = data->base; 1770 struct event *ev[5]; 1771 int ev1_fired=0; 1772 struct timeval ms25 = { 0, 25*1000 }, 1773 ms40 = { 0, 40*1000 }, 1774 ms75 = { 0, 75*1000 }, 1775 ms125 = { 0, 125*1000 }; 1776 struct read_not_timeout_param rntp = { ev, 0, 0 }; 1777 1778 event_base_assert_ok_(base); 1779 1780 ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST, 1781 read_not_timeout_cb, &rntp); 1782 ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired); 1783 ev[2] = evtimer_new(base, remove_timers_cb, ev); 1784 ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1785 ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]); 1786 tt_assert(base); 1787 event_add(ev[2], &ms25); /* remove timers */ 1788 event_add(ev[4], &ms40); /* write to test if timer re-activates */ 1789 event_add(ev[0], &ms75); /* read */ 1790 event_add(ev[1], &ms75); /* timer */ 1791 event_add(ev[3], &ms125); /* timeout. */ 1792 event_base_assert_ok_(base); 1793 1794 event_base_dispatch(base); 1795 1796 tt_int_op(ev1_fired, ==, 0); 1797 tt_int_op(rntp.events, ==, EV_READ); 1798 1799 event_base_assert_ok_(base); 1800 end: 1801 event_free(ev[0]); 1802 event_free(ev[1]); 1803 event_free(ev[2]); 1804 event_free(ev[3]); 1805 event_free(ev[4]); 1806 } 1807 1808 static void 1809 test_event_base_new(void *ptr) 1810 { 1811 struct basic_test_data *data = ptr; 1812 struct event_base *base = 0; 1813 struct event ev1; 1814 struct basic_cb_args args; 1815 1816 int towrite = (int)strlen(TEST1)+1; 1817 int len = write(data->pair[0], TEST1, towrite); 1818 1819 if (len < 0) 1820 tt_abort_perror("initial write"); 1821 else if (len != towrite) 1822 tt_abort_printf(("initial write fell short (%d of %d bytes)", 1823 len, towrite)); 1824 1825 if (shutdown(data->pair[0], SHUT_WR)) 1826 tt_abort_perror("initial write shutdown"); 1827 1828 base = event_base_new(); 1829 if (!base) 1830 tt_abort_msg("failed to create event base"); 1831 1832 args.eb = base; 1833 args.ev = &ev1; 1834 args.callcount = 0; 1835 event_assign(&ev1, base, data->pair[1], 1836 EV_READ|EV_PERSIST, basic_read_cb, &args); 1837 1838 if (event_add(&ev1, NULL)) 1839 tt_abort_perror("initial event_add"); 1840 1841 if (event_base_loop(base, 0)) 1842 tt_abort_msg("unsuccessful exit from event loop"); 1843 1844 end: 1845 if (base) 1846 event_base_free(base); 1847 } 1848 1849 static void 1850 test_loopexit(void) 1851 { 1852 struct timeval tv, tv_start, tv_end; 1853 struct event ev; 1854 1855 setup_test("Loop exit: "); 1856 1857 tv.tv_usec = 0; 1858 tv.tv_sec = 60*60*24; 1859 evtimer_set(&ev, timeout_cb, NULL); 1860 evtimer_add(&ev, &tv); 1861 1862 tv.tv_usec = 300*1000; 1863 tv.tv_sec = 0; 1864 event_loopexit(&tv); 1865 1866 evutil_gettimeofday(&tv_start, NULL); 1867 event_dispatch(); 1868 evutil_gettimeofday(&tv_end, NULL); 1869 1870 evtimer_del(&ev); 1871 1872 tt_assert(event_base_got_exit(global_base)); 1873 tt_assert(!event_base_got_break(global_base)); 1874 1875 test_timeval_diff_eq(&tv_start, &tv_end, 300); 1876 1877 test_ok = 1; 1878 end: 1879 cleanup_test(); 1880 } 1881 1882 static void 1883 test_loopexit_multiple(void) 1884 { 1885 struct timeval tv, tv_start, tv_end; 1886 struct event_base *base; 1887 1888 setup_test("Loop Multiple exit: "); 1889 1890 base = event_base_new(); 1891 1892 tv.tv_usec = 200*1000; 1893 tv.tv_sec = 0; 1894 event_base_loopexit(base, &tv); 1895 1896 tv.tv_usec = 0; 1897 tv.tv_sec = 3; 1898 event_base_loopexit(base, &tv); 1899 1900 evutil_gettimeofday(&tv_start, NULL); 1901 event_base_dispatch(base); 1902 evutil_gettimeofday(&tv_end, NULL); 1903 1904 tt_assert(event_base_got_exit(base)); 1905 tt_assert(!event_base_got_break(base)); 1906 1907 event_base_free(base); 1908 1909 test_timeval_diff_eq(&tv_start, &tv_end, 200); 1910 1911 test_ok = 1; 1912 1913 end: 1914 cleanup_test(); 1915 } 1916 1917 static void 1918 break_cb(evutil_socket_t fd, short events, void *arg) 1919 { 1920 test_ok = 1; 1921 event_loopbreak(); 1922 } 1923 1924 static void 1925 fail_cb(evutil_socket_t fd, short events, void *arg) 1926 { 1927 test_ok = 0; 1928 } 1929 1930 static void 1931 test_loopbreak(void) 1932 { 1933 struct event ev1, ev2; 1934 struct timeval tv; 1935 1936 setup_test("Loop break: "); 1937 1938 tv.tv_sec = 0; 1939 tv.tv_usec = 0; 1940 evtimer_set(&ev1, break_cb, NULL); 1941 evtimer_add(&ev1, &tv); 1942 evtimer_set(&ev2, fail_cb, NULL); 1943 evtimer_add(&ev2, &tv); 1944 1945 event_dispatch(); 1946 1947 tt_assert(!event_base_got_exit(global_base)); 1948 tt_assert(event_base_got_break(global_base)); 1949 1950 evtimer_del(&ev1); 1951 evtimer_del(&ev2); 1952 1953 end: 1954 cleanup_test(); 1955 } 1956 1957 static struct event *readd_test_event_last_added = NULL; 1958 static void 1959 re_add_read_cb(evutil_socket_t fd, short event, void *arg) 1960 { 1961 char buf[256]; 1962 struct event *ev_other = arg; 1963 ev_ssize_t n_read; 1964 1965 readd_test_event_last_added = ev_other; 1966 1967 n_read = read(fd, buf, sizeof(buf)); 1968 1969 if (n_read < 0) { 1970 tt_fail_perror("read"); 1971 event_base_loopbreak(event_get_base(ev_other)); 1972 return; 1973 } else { 1974 event_add(ev_other, NULL); 1975 ++test_ok; 1976 } 1977 } 1978 1979 static void 1980 test_nonpersist_readd(void) 1981 { 1982 struct event ev1, ev2; 1983 1984 setup_test("Re-add nonpersistent events: "); 1985 event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2); 1986 event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1); 1987 1988 if (write(pair[0], "Hello", 5) < 0) { 1989 tt_fail_perror("write(pair[0])"); 1990 } 1991 1992 if (write(pair[1], "Hello", 5) < 0) { 1993 tt_fail_perror("write(pair[1])\n"); 1994 } 1995 1996 if (event_add(&ev1, NULL) == -1 || 1997 event_add(&ev2, NULL) == -1) { 1998 test_ok = 0; 1999 } 2000 if (test_ok != 0) 2001 exit(1); 2002 event_loop(EVLOOP_ONCE); 2003 if (test_ok != 2) 2004 exit(1); 2005 /* At this point, we executed both callbacks. Whichever one got 2006 * called first added the second, but the second then immediately got 2007 * deleted before its callback was called. At this point, though, it 2008 * re-added the first. 2009 */ 2010 if (!readd_test_event_last_added) { 2011 test_ok = 0; 2012 } else if (readd_test_event_last_added == &ev1) { 2013 if (!event_pending(&ev1, EV_READ, NULL) || 2014 event_pending(&ev2, EV_READ, NULL)) 2015 test_ok = 0; 2016 } else { 2017 if (event_pending(&ev1, EV_READ, NULL) || 2018 !event_pending(&ev2, EV_READ, NULL)) 2019 test_ok = 0; 2020 } 2021 2022 event_del(&ev1); 2023 event_del(&ev2); 2024 2025 cleanup_test(); 2026 } 2027 2028 struct test_pri_event { 2029 struct event ev; 2030 int count; 2031 }; 2032 2033 static void 2034 test_priorities_cb(evutil_socket_t fd, short what, void *arg) 2035 { 2036 struct test_pri_event *pri = arg; 2037 struct timeval tv; 2038 2039 if (pri->count == 3) { 2040 event_loopexit(NULL); 2041 return; 2042 } 2043 2044 pri->count++; 2045 2046 evutil_timerclear(&tv); 2047 event_add(&pri->ev, &tv); 2048 } 2049 2050 static void 2051 test_priorities_impl(int npriorities) 2052 { 2053 struct test_pri_event one, two; 2054 struct timeval tv; 2055 2056 TT_BLATHER(("Testing Priorities %d: ", npriorities)); 2057 2058 event_base_priority_init(global_base, npriorities); 2059 2060 memset(&one, 0, sizeof(one)); 2061 memset(&two, 0, sizeof(two)); 2062 2063 timeout_set(&one.ev, test_priorities_cb, &one); 2064 if (event_priority_set(&one.ev, 0) == -1) { 2065 fprintf(stderr, "%s: failed to set priority", __func__); 2066 exit(1); 2067 } 2068 2069 timeout_set(&two.ev, test_priorities_cb, &two); 2070 if (event_priority_set(&two.ev, npriorities - 1) == -1) { 2071 fprintf(stderr, "%s: failed to set priority", __func__); 2072 exit(1); 2073 } 2074 2075 evutil_timerclear(&tv); 2076 2077 if (event_add(&one.ev, &tv) == -1) 2078 exit(1); 2079 if (event_add(&two.ev, &tv) == -1) 2080 exit(1); 2081 2082 event_dispatch(); 2083 2084 event_del(&one.ev); 2085 event_del(&two.ev); 2086 2087 if (npriorities == 1) { 2088 if (one.count == 3 && two.count == 3) 2089 test_ok = 1; 2090 } else if (npriorities == 2) { 2091 /* Two is called once because event_loopexit is priority 1 */ 2092 if (one.count == 3 && two.count == 1) 2093 test_ok = 1; 2094 } else { 2095 if (one.count == 3 && two.count == 0) 2096 test_ok = 1; 2097 } 2098 } 2099 2100 static void 2101 test_priorities(void) 2102 { 2103 test_priorities_impl(1); 2104 if (test_ok) 2105 test_priorities_impl(2); 2106 if (test_ok) 2107 test_priorities_impl(3); 2108 } 2109 2110 /* priority-active-inversion: activate a higher-priority event, and make sure 2111 * it keeps us from running a lower-priority event first. */ 2112 static int n_pai_calls = 0; 2113 static struct event pai_events[3]; 2114 2115 static void 2116 prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg) 2117 { 2118 int *call_order = arg; 2119 *call_order = n_pai_calls++; 2120 if (n_pai_calls == 1) { 2121 /* This should activate later, even though it shares a 2122 priority with us. */ 2123 event_active(&pai_events[1], EV_READ, 1); 2124 /* This should activate next, since its priority is higher, 2125 even though we activated it second. */ 2126 event_active(&pai_events[2], EV_TIMEOUT, 1); 2127 } 2128 } 2129 2130 static void 2131 test_priority_active_inversion(void *data_) 2132 { 2133 struct basic_test_data *data = data_; 2134 struct event_base *base = data->base; 2135 int call_order[3]; 2136 int i; 2137 tt_int_op(event_base_priority_init(base, 8), ==, 0); 2138 2139 n_pai_calls = 0; 2140 memset(call_order, 0, sizeof(call_order)); 2141 2142 for (i=0;i<3;++i) { 2143 event_assign(&pai_events[i], data->base, -1, 0, 2144 prio_active_inversion_cb, &call_order[i]); 2145 } 2146 2147 event_priority_set(&pai_events[0], 4); 2148 event_priority_set(&pai_events[1], 4); 2149 event_priority_set(&pai_events[2], 0); 2150 2151 event_active(&pai_events[0], EV_WRITE, 1); 2152 2153 event_base_dispatch(base); 2154 tt_int_op(n_pai_calls, ==, 3); 2155 tt_int_op(call_order[0], ==, 0); 2156 tt_int_op(call_order[1], ==, 2); 2157 tt_int_op(call_order[2], ==, 1); 2158 end: 2159 ; 2160 } 2161 2162 2163 static void 2164 test_multiple_cb(evutil_socket_t fd, short event, void *arg) 2165 { 2166 if (event & EV_READ) 2167 test_ok |= 1; 2168 else if (event & EV_WRITE) 2169 test_ok |= 2; 2170 } 2171 2172 static void 2173 test_multiple_events_for_same_fd(void) 2174 { 2175 struct event e1, e2; 2176 2177 setup_test("Multiple events for same fd: "); 2178 2179 event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL); 2180 event_add(&e1, NULL); 2181 event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL); 2182 event_add(&e2, NULL); 2183 event_loop(EVLOOP_ONCE); 2184 event_del(&e2); 2185 2186 if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) { 2187 tt_fail_perror("write"); 2188 } 2189 2190 event_loop(EVLOOP_ONCE); 2191 event_del(&e1); 2192 2193 if (test_ok != 3) 2194 test_ok = 0; 2195 2196 cleanup_test(); 2197 } 2198 2199 int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2200 int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf); 2201 int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number); 2202 int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf); 2203 2204 static void 2205 read_once_cb(evutil_socket_t fd, short event, void *arg) 2206 { 2207 char buf[256]; 2208 int len; 2209 2210 len = read(fd, buf, sizeof(buf)); 2211 2212 if (called) { 2213 test_ok = 0; 2214 } else if (len) { 2215 /* Assumes global pair[0] can be used for writing */ 2216 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2217 tt_fail_perror("write"); 2218 test_ok = 0; 2219 } else { 2220 test_ok = 1; 2221 } 2222 } 2223 2224 called++; 2225 } 2226 2227 static void 2228 test_want_only_once(void) 2229 { 2230 struct event ev; 2231 struct timeval tv; 2232 2233 /* Very simple read test */ 2234 setup_test("Want read only once: "); 2235 2236 if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) { 2237 tt_fail_perror("write"); 2238 } 2239 2240 /* Setup the loop termination */ 2241 evutil_timerclear(&tv); 2242 tv.tv_usec = 300*1000; 2243 event_loopexit(&tv); 2244 2245 event_set(&ev, pair[1], EV_READ, read_once_cb, &ev); 2246 if (event_add(&ev, NULL) == -1) 2247 exit(1); 2248 event_dispatch(); 2249 2250 cleanup_test(); 2251 } 2252 2253 #define TEST_MAX_INT 6 2254 2255 static void 2256 evtag_int_test(void *ptr) 2257 { 2258 struct evbuffer *tmp = evbuffer_new(); 2259 ev_uint32_t integers[TEST_MAX_INT] = { 2260 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2261 }; 2262 ev_uint32_t integer; 2263 ev_uint64_t big_int; 2264 int i; 2265 2266 evtag_init(); 2267 2268 for (i = 0; i < TEST_MAX_INT; i++) { 2269 int oldlen, newlen; 2270 oldlen = (int)EVBUFFER_LENGTH(tmp); 2271 evtag_encode_int(tmp, integers[i]); 2272 newlen = (int)EVBUFFER_LENGTH(tmp); 2273 TT_BLATHER(("encoded 0x%08x with %d bytes", 2274 (unsigned)integers[i], newlen - oldlen)); 2275 big_int = integers[i]; 2276 big_int *= 1000000000; /* 1 billion */ 2277 evtag_encode_int64(tmp, big_int); 2278 } 2279 2280 for (i = 0; i < TEST_MAX_INT; i++) { 2281 tt_int_op(evtag_decode_int(&integer, tmp), !=, -1); 2282 tt_uint_op(integer, ==, integers[i]); 2283 tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1); 2284 tt_assert((big_int / 1000000000) == integers[i]); 2285 } 2286 2287 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2288 end: 2289 evbuffer_free(tmp); 2290 } 2291 2292 static void 2293 evtag_fuzz(void *ptr) 2294 { 2295 u_char buffer[4096]; 2296 struct evbuffer *tmp = evbuffer_new(); 2297 struct timeval tv; 2298 int i, j; 2299 2300 int not_failed = 0; 2301 2302 evtag_init(); 2303 2304 for (j = 0; j < 100; j++) { 2305 for (i = 0; i < (int)sizeof(buffer); i++) 2306 buffer[i] = test_weakrand(); 2307 evbuffer_drain(tmp, -1); 2308 evbuffer_add(tmp, buffer, sizeof(buffer)); 2309 2310 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) 2311 not_failed++; 2312 } 2313 2314 /* The majority of decodes should fail */ 2315 tt_int_op(not_failed, <, 10); 2316 2317 /* Now insert some corruption into the tag length field */ 2318 evbuffer_drain(tmp, -1); 2319 evutil_timerclear(&tv); 2320 tv.tv_sec = 1; 2321 evtag_marshal_timeval(tmp, 0, &tv); 2322 evbuffer_add(tmp, buffer, sizeof(buffer)); 2323 2324 ((char *)EVBUFFER_DATA(tmp))[1] = '\xff'; 2325 if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) { 2326 tt_abort_msg("evtag_unmarshal_timeval should have failed"); 2327 } 2328 2329 end: 2330 evbuffer_free(tmp); 2331 } 2332 2333 static void 2334 evtag_tag_encoding(void *ptr) 2335 { 2336 struct evbuffer *tmp = evbuffer_new(); 2337 ev_uint32_t integers[TEST_MAX_INT] = { 2338 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000 2339 }; 2340 ev_uint32_t integer; 2341 int i; 2342 2343 evtag_init(); 2344 2345 for (i = 0; i < TEST_MAX_INT; i++) { 2346 int oldlen, newlen; 2347 oldlen = (int)EVBUFFER_LENGTH(tmp); 2348 evtag_encode_tag(tmp, integers[i]); 2349 newlen = (int)EVBUFFER_LENGTH(tmp); 2350 TT_BLATHER(("encoded 0x%08x with %d bytes", 2351 (unsigned)integers[i], newlen - oldlen)); 2352 } 2353 2354 for (i = 0; i < TEST_MAX_INT; i++) { 2355 tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1); 2356 tt_uint_op(integer, ==, integers[i]); 2357 } 2358 2359 tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0); 2360 2361 end: 2362 evbuffer_free(tmp); 2363 } 2364 2365 static void 2366 evtag_test_peek(void *ptr) 2367 { 2368 struct evbuffer *tmp = evbuffer_new(); 2369 ev_uint32_t u32; 2370 2371 evtag_marshal_int(tmp, 30, 0); 2372 evtag_marshal_string(tmp, 40, "Hello world"); 2373 2374 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2375 tt_int_op(u32, ==, 30); 2376 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2377 tt_int_op(u32, ==, 1+1+1); 2378 tt_int_op(evtag_consume(tmp), ==, 0); 2379 2380 tt_int_op(evtag_peek(tmp, &u32), ==, 1); 2381 tt_int_op(u32, ==, 40); 2382 tt_int_op(evtag_peek_length(tmp, &u32), ==, 0); 2383 tt_int_op(u32, ==, 1+1+11); 2384 tt_int_op(evtag_payload_length(tmp, &u32), ==, 0); 2385 tt_int_op(u32, ==, 11); 2386 2387 end: 2388 evbuffer_free(tmp); 2389 } 2390 2391 2392 static void 2393 test_methods(void *ptr) 2394 { 2395 const char **methods = event_get_supported_methods(); 2396 struct event_config *cfg = NULL; 2397 struct event_base *base = NULL; 2398 const char *backend; 2399 int n_methods = 0; 2400 2401 tt_assert(methods); 2402 2403 backend = methods[0]; 2404 while (*methods != NULL) { 2405 TT_BLATHER(("Support method: %s", *methods)); 2406 ++methods; 2407 ++n_methods; 2408 } 2409 2410 cfg = event_config_new(); 2411 assert(cfg != NULL); 2412 2413 tt_int_op(event_config_avoid_method(cfg, backend), ==, 0); 2414 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2415 2416 base = event_base_new_with_config(cfg); 2417 if (n_methods > 1) { 2418 tt_assert(base); 2419 tt_str_op(backend, !=, event_base_get_method(base)); 2420 } else { 2421 tt_assert(base == NULL); 2422 } 2423 2424 end: 2425 if (base) 2426 event_base_free(base); 2427 if (cfg) 2428 event_config_free(cfg); 2429 } 2430 2431 static void 2432 test_version(void *arg) 2433 { 2434 const char *vstr; 2435 ev_uint32_t vint; 2436 int major, minor, patch, n; 2437 2438 vstr = event_get_version(); 2439 vint = event_get_version_number(); 2440 2441 tt_assert(vstr); 2442 tt_assert(vint); 2443 2444 tt_str_op(vstr, ==, LIBEVENT_VERSION); 2445 tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER); 2446 2447 n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch); 2448 tt_assert(3 == n); 2449 tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8))); 2450 end: 2451 ; 2452 } 2453 2454 static void 2455 test_base_features(void *arg) 2456 { 2457 struct event_base *base = NULL; 2458 struct event_config *cfg = NULL; 2459 2460 cfg = event_config_new(); 2461 2462 tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET)); 2463 2464 base = event_base_new_with_config(cfg); 2465 if (base) { 2466 tt_int_op(EV_FEATURE_ET, ==, 2467 event_base_get_features(base) & EV_FEATURE_ET); 2468 } else { 2469 base = event_base_new(); 2470 tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET); 2471 } 2472 2473 end: 2474 if (base) 2475 event_base_free(base); 2476 if (cfg) 2477 event_config_free(cfg); 2478 } 2479 2480 #ifdef EVENT__HAVE_SETENV 2481 #define SETENV_OK 2482 #elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV) 2483 static void setenv(const char *k, const char *v, int o_) 2484 { 2485 char b[256]; 2486 evutil_snprintf(b, sizeof(b), "%s=%s",k,v); 2487 putenv(b); 2488 } 2489 #define SETENV_OK 2490 #endif 2491 2492 #ifdef EVENT__HAVE_UNSETENV 2493 #define UNSETENV_OK 2494 #elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV) 2495 static void unsetenv(const char *k) 2496 { 2497 char b[256]; 2498 evutil_snprintf(b, sizeof(b), "%s=",k); 2499 putenv(b); 2500 } 2501 #define UNSETENV_OK 2502 #endif 2503 2504 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2505 static void 2506 methodname_to_envvar(const char *mname, char *buf, size_t buflen) 2507 { 2508 char *cp; 2509 evutil_snprintf(buf, buflen, "EVENT_NO%s", mname); 2510 for (cp = buf; *cp; ++cp) { 2511 *cp = EVUTIL_TOUPPER_(*cp); 2512 } 2513 } 2514 #endif 2515 2516 static void 2517 test_base_environ(void *arg) 2518 { 2519 struct event_base *base = NULL; 2520 struct event_config *cfg = NULL; 2521 2522 #if defined(SETENV_OK) && defined(UNSETENV_OK) 2523 const char **basenames; 2524 int i, n_methods=0; 2525 char varbuf[128]; 2526 const char *defaultname, *ignoreenvname; 2527 2528 /* See if unsetenv works before we rely on it. */ 2529 setenv("EVENT_NOWAFFLES", "1", 1); 2530 unsetenv("EVENT_NOWAFFLES"); 2531 if (getenv("EVENT_NOWAFFLES") != NULL) { 2532 #ifndef EVENT__HAVE_UNSETENV 2533 TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test")); 2534 #else 2535 TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test")); 2536 #endif 2537 tt_skip(); 2538 } 2539 2540 basenames = event_get_supported_methods(); 2541 for (i = 0; basenames[i]; ++i) { 2542 methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf)); 2543 unsetenv(varbuf); 2544 ++n_methods; 2545 } 2546 2547 base = event_base_new(); 2548 tt_assert(base); 2549 2550 defaultname = event_base_get_method(base); 2551 TT_BLATHER(("default is <%s>", defaultname)); 2552 event_base_free(base); 2553 base = NULL; 2554 2555 /* Can we disable the method with EVENT_NOfoo ? */ 2556 if (!strcmp(defaultname, "epoll (with changelist)")) { 2557 setenv("EVENT_NOEPOLL", "1", 1); 2558 ignoreenvname = "epoll"; 2559 } else { 2560 methodname_to_envvar(defaultname, varbuf, sizeof(varbuf)); 2561 setenv(varbuf, "1", 1); 2562 ignoreenvname = defaultname; 2563 } 2564 2565 /* Use an empty cfg rather than NULL so a failure doesn't exit() */ 2566 cfg = event_config_new(); 2567 base = event_base_new_with_config(cfg); 2568 event_config_free(cfg); 2569 cfg = NULL; 2570 if (n_methods == 1) { 2571 tt_assert(!base); 2572 } else { 2573 tt_assert(base); 2574 tt_str_op(defaultname, !=, event_base_get_method(base)); 2575 event_base_free(base); 2576 base = NULL; 2577 } 2578 2579 /* Can we disable looking at the environment with IGNORE_ENV ? */ 2580 cfg = event_config_new(); 2581 event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV); 2582 base = event_base_new_with_config(cfg); 2583 tt_assert(base); 2584 tt_str_op(ignoreenvname, ==, event_base_get_method(base)); 2585 #else 2586 tt_skip(); 2587 #endif 2588 2589 end: 2590 if (base) 2591 event_base_free(base); 2592 if (cfg) 2593 event_config_free(cfg); 2594 } 2595 2596 static void 2597 read_called_once_cb(evutil_socket_t fd, short event, void *arg) 2598 { 2599 tt_int_op(event, ==, EV_READ); 2600 called += 1; 2601 end: 2602 ; 2603 } 2604 2605 static void 2606 timeout_called_once_cb(evutil_socket_t fd, short event, void *arg) 2607 { 2608 tt_int_op(event, ==, EV_TIMEOUT); 2609 called += 100; 2610 end: 2611 ; 2612 } 2613 2614 static void 2615 immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg) 2616 { 2617 tt_int_op(event, ==, EV_TIMEOUT); 2618 called += 1000; 2619 end: 2620 ; 2621 } 2622 2623 static void 2624 test_event_once(void *ptr) 2625 { 2626 struct basic_test_data *data = ptr; 2627 struct timeval tv; 2628 int r; 2629 2630 tv.tv_sec = 0; 2631 tv.tv_usec = 50*1000; 2632 called = 0; 2633 r = event_base_once(data->base, data->pair[0], EV_READ, 2634 read_called_once_cb, NULL, NULL); 2635 tt_int_op(r, ==, 0); 2636 r = event_base_once(data->base, -1, EV_TIMEOUT, 2637 timeout_called_once_cb, NULL, &tv); 2638 tt_int_op(r, ==, 0); 2639 r = event_base_once(data->base, -1, 0, NULL, NULL, NULL); 2640 tt_int_op(r, <, 0); 2641 r = event_base_once(data->base, -1, EV_TIMEOUT, 2642 immediate_called_twice_cb, NULL, NULL); 2643 tt_int_op(r, ==, 0); 2644 tv.tv_sec = 0; 2645 tv.tv_usec = 0; 2646 r = event_base_once(data->base, -1, EV_TIMEOUT, 2647 immediate_called_twice_cb, NULL, &tv); 2648 tt_int_op(r, ==, 0); 2649 2650 if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) { 2651 tt_fail_perror("write"); 2652 } 2653 2654 shutdown(data->pair[1], SHUT_WR); 2655 2656 event_base_dispatch(data->base); 2657 2658 tt_int_op(called, ==, 2101); 2659 end: 2660 ; 2661 } 2662 2663 static void 2664 test_event_once_never(void *ptr) 2665 { 2666 struct basic_test_data *data = ptr; 2667 struct timeval tv; 2668 2669 /* Have one trigger in 10 seconds (don't worry, because) */ 2670 tv.tv_sec = 10; 2671 tv.tv_usec = 0; 2672 called = 0; 2673 event_base_once(data->base, -1, EV_TIMEOUT, 2674 timeout_called_once_cb, NULL, &tv); 2675 2676 /* But shut down the base in 75 msec. */ 2677 tv.tv_sec = 0; 2678 tv.tv_usec = 75*1000; 2679 event_base_loopexit(data->base, &tv); 2680 2681 event_base_dispatch(data->base); 2682 2683 tt_int_op(called, ==, 0); 2684 end: 2685 ; 2686 } 2687 2688 static void 2689 test_event_pending(void *ptr) 2690 { 2691 struct basic_test_data *data = ptr; 2692 struct event *r=NULL, *w=NULL, *t=NULL; 2693 struct timeval tv, now, tv2; 2694 2695 tv.tv_sec = 0; 2696 tv.tv_usec = 500 * 1000; 2697 r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb, 2698 NULL); 2699 w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb, 2700 NULL); 2701 t = evtimer_new(data->base, timeout_cb, NULL); 2702 2703 tt_assert(r); 2704 tt_assert(w); 2705 tt_assert(t); 2706 2707 evutil_gettimeofday(&now, NULL); 2708 event_add(r, NULL); 2709 event_add(t, &tv); 2710 2711 tt_assert( event_pending(r, EV_READ, NULL)); 2712 tt_assert(!event_pending(w, EV_WRITE, NULL)); 2713 tt_assert(!event_pending(r, EV_WRITE, NULL)); 2714 tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL)); 2715 tt_assert(!event_pending(r, EV_TIMEOUT, NULL)); 2716 tt_assert( event_pending(t, EV_TIMEOUT, NULL)); 2717 tt_assert( event_pending(t, EV_TIMEOUT, &tv2)); 2718 2719 tt_assert(evutil_timercmp(&tv2, &now, >)); 2720 2721 test_timeval_diff_eq(&now, &tv2, 500); 2722 2723 end: 2724 if (r) { 2725 event_del(r); 2726 event_free(r); 2727 } 2728 if (w) { 2729 event_del(w); 2730 event_free(w); 2731 } 2732 if (t) { 2733 event_del(t); 2734 event_free(t); 2735 } 2736 } 2737 2738 #ifndef _WIN32 2739 /* You can't do this test on windows, since dup2 doesn't work on sockets */ 2740 2741 static void 2742 dfd_cb(evutil_socket_t fd, short e, void *data) 2743 { 2744 *(int*)data = (int)e; 2745 } 2746 2747 /* Regression test for our workaround for a fun epoll/linux related bug 2748 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2) 2749 * will get you an EEXIST */ 2750 static void 2751 test_dup_fd(void *arg) 2752 { 2753 struct basic_test_data *data = arg; 2754 struct event_base *base = data->base; 2755 struct event *ev1=NULL, *ev2=NULL; 2756 int fd, dfd=-1; 2757 int ev1_got, ev2_got; 2758 2759 tt_int_op(write(data->pair[0], "Hello world", 2760 strlen("Hello world")), >, 0); 2761 fd = data->pair[1]; 2762 2763 dfd = dup(fd); 2764 tt_int_op(dfd, >=, 0); 2765 2766 ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got); 2767 ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got); 2768 ev1_got = ev2_got = 0; 2769 event_add(ev1, NULL); 2770 event_add(ev2, NULL); 2771 event_base_loop(base, EVLOOP_ONCE); 2772 tt_int_op(ev1_got, ==, EV_READ); 2773 tt_int_op(ev2_got, ==, EV_READ); 2774 2775 /* Now close and delete dfd then dispatch. We need to do the 2776 * dispatch here so that when we add it later, we think there 2777 * was an intermediate delete. */ 2778 close(dfd); 2779 event_del(ev2); 2780 ev1_got = ev2_got = 0; 2781 event_base_loop(base, EVLOOP_ONCE); 2782 tt_want_int_op(ev1_got, ==, EV_READ); 2783 tt_int_op(ev2_got, ==, 0); 2784 2785 /* Re-duplicate the fd. We need to get the same duplicated 2786 * value that we closed to provoke the epoll quirk. Also, we 2787 * need to change the events to write, or else the old lingering 2788 * read event will make the test pass whether the change was 2789 * successful or not. */ 2790 tt_int_op(dup2(fd, dfd), ==, dfd); 2791 event_free(ev2); 2792 ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got); 2793 event_add(ev2, NULL); 2794 ev1_got = ev2_got = 0; 2795 event_base_loop(base, EVLOOP_ONCE); 2796 tt_want_int_op(ev1_got, ==, EV_READ); 2797 tt_int_op(ev2_got, ==, EV_WRITE); 2798 2799 end: 2800 if (ev1) 2801 event_free(ev1); 2802 if (ev2) 2803 event_free(ev2); 2804 if (dfd >= 0) 2805 close(dfd); 2806 } 2807 #endif 2808 2809 #ifdef EVENT__DISABLE_MM_REPLACEMENT 2810 static void 2811 test_mm_functions(void *arg) 2812 { 2813 tinytest_set_test_skipped_(); 2814 } 2815 #else 2816 static int 2817 check_dummy_mem_ok(void *mem_) 2818 { 2819 char *mem = mem_; 2820 mem -= 16; 2821 return !memcmp(mem, "{[<guardedram>]}", 16); 2822 } 2823 2824 static void * 2825 dummy_malloc(size_t len) 2826 { 2827 char *mem = malloc(len+16); 2828 if (mem == NULL) { 2829 fprintf(stderr, "Unable to allocate memory in dummy_malloc()\n"); 2830 return NULL; 2831 } 2832 memcpy(mem, "{[<guardedram>]}", 16); 2833 return mem+16; 2834 } 2835 2836 static void * 2837 dummy_realloc(void *mem_, size_t len) 2838 { 2839 char *mem = mem_; 2840 if (!mem) 2841 return dummy_malloc(len); 2842 tt_want(check_dummy_mem_ok(mem_)); 2843 mem -= 16; 2844 mem = realloc(mem, len+16); 2845 return mem+16; 2846 } 2847 2848 static void 2849 dummy_free(void *mem_) 2850 { 2851 char *mem = mem_; 2852 tt_want(check_dummy_mem_ok(mem_)); 2853 mem -= 16; 2854 free(mem); 2855 } 2856 2857 static void 2858 test_mm_functions(void *arg) 2859 { 2860 struct event_base *b = NULL; 2861 struct event_config *cfg = NULL; 2862 event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free); 2863 cfg = event_config_new(); 2864 event_config_avoid_method(cfg, "Nonesuch"); 2865 b = event_base_new_with_config(cfg); 2866 tt_assert(b); 2867 tt_assert(check_dummy_mem_ok(b)); 2868 end: 2869 if (cfg) 2870 event_config_free(cfg); 2871 if (b) 2872 event_base_free(b); 2873 } 2874 #endif 2875 2876 static void 2877 many_event_cb(evutil_socket_t fd, short event, void *arg) 2878 { 2879 int *calledp = arg; 2880 *calledp += 1; 2881 } 2882 2883 static void 2884 test_many_events(void *arg) 2885 { 2886 /* Try 70 events that should all be ready at once. This will 2887 * exercise the "resize" code on most of the backends, and will make 2888 * sure that we can get past the 64-handle limit of some windows 2889 * functions. */ 2890 #define MANY 70 2891 2892 struct basic_test_data *data = arg; 2893 struct event_base *base = data->base; 2894 int one_at_a_time = data->setup_data != NULL; 2895 evutil_socket_t sock[MANY]; 2896 struct event *ev[MANY]; 2897 int called[MANY]; 2898 int i; 2899 int loopflags = EVLOOP_NONBLOCK, evflags=0; 2900 if (one_at_a_time) { 2901 loopflags |= EVLOOP_ONCE; 2902 evflags = EV_PERSIST; 2903 } 2904 2905 memset(sock, 0xff, sizeof(sock)); 2906 memset(ev, 0, sizeof(ev)); 2907 memset(called, 0, sizeof(called)); 2908 2909 for (i = 0; i < MANY; ++i) { 2910 /* We need an event that will hit the backend, and that will 2911 * be ready immediately. "Send a datagram" is an easy 2912 * instance of that. */ 2913 sock[i] = socket(AF_INET, SOCK_DGRAM, 0); 2914 tt_assert(sock[i] >= 0); 2915 called[i] = 0; 2916 ev[i] = event_new(base, sock[i], EV_WRITE|evflags, 2917 many_event_cb, &called[i]); 2918 event_add(ev[i], NULL); 2919 if (one_at_a_time) 2920 event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE); 2921 } 2922 2923 event_base_loop(base, loopflags); 2924 2925 for (i = 0; i < MANY; ++i) { 2926 if (one_at_a_time) 2927 tt_int_op(called[i], ==, MANY - i + 1); 2928 else 2929 tt_int_op(called[i], ==, 1); 2930 } 2931 2932 end: 2933 for (i = 0; i < MANY; ++i) { 2934 if (ev[i]) 2935 event_free(ev[i]); 2936 if (sock[i] >= 0) 2937 evutil_closesocket(sock[i]); 2938 } 2939 #undef MANY 2940 } 2941 2942 static void 2943 test_struct_event_size(void *arg) 2944 { 2945 tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event)); 2946 end: 2947 ; 2948 } 2949 2950 static void 2951 test_get_assignment(void *arg) 2952 { 2953 struct basic_test_data *data = arg; 2954 struct event_base *base = data->base; 2955 struct event *ev1 = NULL; 2956 const char *str = "foo"; 2957 2958 struct event_base *b; 2959 evutil_socket_t s; 2960 short what; 2961 event_callback_fn cb; 2962 void *cb_arg; 2963 2964 ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str); 2965 event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg); 2966 2967 tt_ptr_op(b, ==, base); 2968 tt_int_op(s, ==, data->pair[1]); 2969 tt_int_op(what, ==, EV_READ); 2970 tt_ptr_op(cb, ==, dummy_read_cb); 2971 tt_ptr_op(cb_arg, ==, str); 2972 2973 /* Now make sure this doesn't crash. */ 2974 event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL); 2975 2976 end: 2977 if (ev1) 2978 event_free(ev1); 2979 } 2980 2981 struct foreach_helper { 2982 int count; 2983 const struct event *ev; 2984 }; 2985 2986 static int 2987 foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg) 2988 { 2989 struct foreach_helper *h = event_get_callback_arg(ev); 2990 struct timeval *tv = arg; 2991 if (event_get_callback(ev) != timeout_cb) 2992 return 0; 2993 tt_ptr_op(event_get_base(ev), ==, base); 2994 tt_int_op(tv->tv_sec, ==, 10); 2995 h->ev = ev; 2996 h->count++; 2997 return 0; 2998 end: 2999 return -1; 3000 } 3001 3002 static int 3003 foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg) 3004 { 3005 const struct event **ev_out = arg; 3006 struct foreach_helper *h = event_get_callback_arg(ev); 3007 if (event_get_callback(ev) != timeout_cb) 3008 return 0; 3009 if (h->count == 99) { 3010 *ev_out = ev; 3011 return 101; 3012 } 3013 return 0; 3014 } 3015 3016 static void 3017 test_event_foreach(void *arg) 3018 { 3019 struct basic_test_data *data = arg; 3020 struct event_base *base = data->base; 3021 struct event *ev[5]; 3022 struct foreach_helper visited[5]; 3023 int i; 3024 struct timeval ten_sec = {10,0}; 3025 const struct event *ev_found = NULL; 3026 3027 for (i = 0; i < 5; ++i) { 3028 visited[i].count = 0; 3029 visited[i].ev = NULL; 3030 ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]); 3031 } 3032 3033 tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL)); 3034 tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL)); 3035 3036 event_add(ev[0], &ten_sec); 3037 event_add(ev[1], &ten_sec); 3038 event_active(ev[1], EV_TIMEOUT, 1); 3039 event_active(ev[2], EV_TIMEOUT, 1); 3040 event_add(ev[3], &ten_sec); 3041 /* Don't touch ev[4]. */ 3042 3043 tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb, 3044 &ten_sec)); 3045 tt_int_op(1, ==, visited[0].count); 3046 tt_int_op(1, ==, visited[1].count); 3047 tt_int_op(1, ==, visited[2].count); 3048 tt_int_op(1, ==, visited[3].count); 3049 tt_ptr_op(ev[0], ==, visited[0].ev); 3050 tt_ptr_op(ev[1], ==, visited[1].ev); 3051 tt_ptr_op(ev[2], ==, visited[2].ev); 3052 tt_ptr_op(ev[3], ==, visited[3].ev); 3053 3054 visited[2].count = 99; 3055 tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb, 3056 &ev_found)); 3057 tt_ptr_op(ev_found, ==, ev[2]); 3058 3059 end: 3060 for (i=0; i<5; ++i) { 3061 event_free(ev[i]); 3062 } 3063 } 3064 3065 static struct event_base *cached_time_base = NULL; 3066 static int cached_time_reset = 0; 3067 static int cached_time_sleep = 0; 3068 static void 3069 cache_time_cb(evutil_socket_t fd, short what, void *arg) 3070 { 3071 struct timeval *tv = arg; 3072 tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv)); 3073 if (cached_time_sleep) { 3074 struct timeval delay = { 0, 30*1000 }; 3075 evutil_usleep_(&delay); 3076 } 3077 if (cached_time_reset) { 3078 event_base_update_cache_time(cached_time_base); 3079 } 3080 end: 3081 ; 3082 } 3083 3084 static void 3085 test_gettimeofday_cached(void *arg) 3086 { 3087 struct basic_test_data *data = arg; 3088 struct event_config *cfg = NULL; 3089 struct event_base *base = NULL; 3090 struct timeval tv1, tv2, tv3, now; 3091 struct event *ev1=NULL, *ev2=NULL, *ev3=NULL; 3092 int cached_time_disable = strstr(data->setup_data, "disable") != NULL; 3093 3094 cfg = event_config_new(); 3095 if (cached_time_disable) { 3096 event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME); 3097 } 3098 cached_time_base = base = event_base_new_with_config(cfg); 3099 tt_assert(base); 3100 3101 /* Try gettimeofday_cached outside of an event loop. */ 3102 evutil_gettimeofday(&now, NULL); 3103 tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1)); 3104 tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2)); 3105 tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10); 3106 tt_int_op(timeval_msec_diff(&tv1, &now), <, 10); 3107 3108 cached_time_reset = strstr(data->setup_data, "reset") != NULL; 3109 cached_time_sleep = strstr(data->setup_data, "sleep") != NULL; 3110 3111 ev1 = event_new(base, -1, 0, cache_time_cb, &tv1); 3112 ev2 = event_new(base, -1, 0, cache_time_cb, &tv2); 3113 ev3 = event_new(base, -1, 0, cache_time_cb, &tv3); 3114 3115 event_active(ev1, EV_TIMEOUT, 1); 3116 event_active(ev2, EV_TIMEOUT, 1); 3117 event_active(ev3, EV_TIMEOUT, 1); 3118 3119 event_base_dispatch(base); 3120 3121 if (cached_time_reset && cached_time_sleep) { 3122 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3123 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3124 } else if (cached_time_disable && cached_time_sleep) { 3125 tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10); 3126 tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10); 3127 } else if (! cached_time_disable) { 3128 tt_assert(evutil_timercmp(&tv1, &tv2, ==)); 3129 tt_assert(evutil_timercmp(&tv2, &tv3, ==)); 3130 } 3131 3132 end: 3133 if (ev1) 3134 event_free(ev1); 3135 if (ev2) 3136 event_free(ev2); 3137 if (ev3) 3138 event_free(ev3); 3139 if (base) 3140 event_base_free(base); 3141 if (cfg) 3142 event_config_free(cfg); 3143 } 3144 3145 static void 3146 tabf_cb(evutil_socket_t fd, short what, void *arg) 3147 { 3148 int *ptr = arg; 3149 *ptr = what; 3150 *ptr += 0x10000; 3151 } 3152 3153 static void 3154 test_active_by_fd(void *arg) 3155 { 3156 struct basic_test_data *data = arg; 3157 struct event_base *base = data->base; 3158 struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL; 3159 int e1,e2,e3,e4; 3160 #ifndef _WIN32 3161 struct event *evsig = NULL; 3162 int es; 3163 #endif 3164 struct timeval tenmin = { 600, 0 }; 3165 3166 /* Ensure no crash on nonexistent FD. */ 3167 event_base_active_by_fd(base, 1000, EV_READ); 3168 3169 /* Ensure no crash on bogus FD. */ 3170 event_base_active_by_fd(base, -1, EV_READ); 3171 3172 /* Ensure no crash on nonexistent/bogus signal. */ 3173 event_base_active_by_signal(base, 1000); 3174 event_base_active_by_signal(base, -1); 3175 3176 event_base_assert_ok_(base); 3177 3178 e1 = e2 = e3 = e4 = 0; 3179 ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1); 3180 ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2); 3181 ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3); 3182 ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4); 3183 tt_assert(ev1); 3184 tt_assert(ev2); 3185 tt_assert(ev3); 3186 tt_assert(ev4); 3187 #ifndef _WIN32 3188 evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es); 3189 tt_assert(evsig); 3190 event_add(evsig, &tenmin); 3191 #endif 3192 3193 event_add(ev1, &tenmin); 3194 event_add(ev2, NULL); 3195 event_add(ev3, NULL); 3196 event_add(ev4, &tenmin); 3197 3198 3199 event_base_assert_ok_(base); 3200 3201 /* Trigger 2, 3, 4 */ 3202 event_base_active_by_fd(base, data->pair[0], EV_WRITE); 3203 event_base_active_by_fd(base, data->pair[1], EV_READ); 3204 #ifndef _WIN32 3205 event_base_active_by_signal(base, SIGHUP); 3206 #endif 3207 3208 event_base_assert_ok_(base); 3209 3210 event_base_loop(base, EVLOOP_ONCE); 3211 3212 tt_int_op(e1, ==, 0); 3213 tt_int_op(e2, ==, EV_WRITE | 0x10000); 3214 tt_int_op(e3, ==, EV_READ | 0x10000); 3215 /* Mask out EV_WRITE here, since it could be genuinely writeable. */ 3216 tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000); 3217 #ifndef _WIN32 3218 tt_int_op(es, ==, EV_SIGNAL | 0x10000); 3219 #endif 3220 3221 end: 3222 if (ev1) 3223 event_free(ev1); 3224 if (ev2) 3225 event_free(ev2); 3226 if (ev3) 3227 event_free(ev3); 3228 if (ev4) 3229 event_free(ev4); 3230 #ifndef _WIN32 3231 if (evsig) 3232 event_free(evsig); 3233 #endif 3234 } 3235 3236 struct testcase_t main_testcases[] = { 3237 /* Some converted-over tests */ 3238 { "methods", test_methods, TT_FORK, NULL, NULL }, 3239 { "version", test_version, 0, NULL, NULL }, 3240 BASIC(base_features, TT_FORK|TT_NO_LOGS), 3241 { "base_environ", test_base_environ, TT_FORK, NULL, NULL }, 3242 3243 BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR), 3244 BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR), 3245 3246 BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE), 3247 BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE), 3248 BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE), 3249 BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE), 3250 BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE), 3251 3252 BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3253 BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS), 3254 BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3255 BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3256 3257 /* These are still using the old API */ 3258 LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE), 3259 { "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3260 { "persistent_active_timeout", test_persistent_active_timeout, 3261 TT_FORK|TT_NEED_BASE, &basic_setup, NULL }, 3262 LEGACY(priorities, TT_FORK|TT_NEED_BASE), 3263 BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE), 3264 { "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE, 3265 &basic_setup, NULL }, 3266 3267 /* These legacy tests may not all need all of these flags. */ 3268 LEGACY(simpleread, TT_ISOLATED), 3269 LEGACY(simpleread_multiple, TT_ISOLATED), 3270 LEGACY(simplewrite, TT_ISOLATED), 3271 { "simpleclose", test_simpleclose, TT_FORK, &basic_setup, 3272 NULL }, 3273 LEGACY(multiple, TT_ISOLATED), 3274 LEGACY(persistent, TT_ISOLATED), 3275 LEGACY(combined, TT_ISOLATED), 3276 LEGACY(simpletimeout, TT_ISOLATED), 3277 LEGACY(loopbreak, TT_ISOLATED), 3278 LEGACY(loopexit, TT_ISOLATED), 3279 LEGACY(loopexit_multiple, TT_ISOLATED), 3280 LEGACY(nonpersist_readd, TT_ISOLATED), 3281 LEGACY(multiple_events_for_same_fd, TT_ISOLATED), 3282 LEGACY(want_only_once, TT_ISOLATED), 3283 { "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL }, 3284 { "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL }, 3285 { "event_pending", test_event_pending, TT_ISOLATED, &basic_setup, 3286 NULL }, 3287 #ifndef _WIN32 3288 { "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL }, 3289 #endif 3290 { "mm_functions", test_mm_functions, TT_FORK, NULL, NULL }, 3291 { "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL }, 3292 { "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 }, 3293 3294 { "struct_event_size", test_struct_event_size, 0, NULL, NULL }, 3295 BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3296 3297 BASIC(event_foreach, TT_FORK|TT_NEED_BASE), 3298 { "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" }, 3299 { "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" }, 3300 { "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" }, 3301 { "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" }, 3302 { "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" }, 3303 3304 BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR), 3305 3306 #ifndef _WIN32 3307 LEGACY(fork, TT_ISOLATED), 3308 #endif 3309 END_OF_TESTCASES 3310 }; 3311 3312 struct testcase_t evtag_testcases[] = { 3313 { "int", evtag_int_test, TT_FORK, NULL, NULL }, 3314 { "fuzz", evtag_fuzz, TT_FORK, NULL, NULL }, 3315 { "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL }, 3316 { "peek", evtag_test_peek, 0, NULL, NULL }, 3317 3318 END_OF_TESTCASES 3319 }; 3320 3321 struct testcase_t signal_testcases[] = { 3322 #ifndef _WIN32 3323 LEGACY(simplestsignal, TT_ISOLATED), 3324 LEGACY(simplesignal, TT_ISOLATED), 3325 LEGACY(multiplesignal, TT_ISOLATED), 3326 LEGACY(immediatesignal, TT_ISOLATED), 3327 LEGACY(signal_dealloc, TT_ISOLATED), 3328 LEGACY(signal_pipeloss, TT_ISOLATED), 3329 LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS), 3330 LEGACY(signal_restore, TT_ISOLATED), 3331 LEGACY(signal_assert, TT_ISOLATED), 3332 LEGACY(signal_while_processing, TT_ISOLATED), 3333 #endif 3334 END_OF_TESTCASES 3335 }; 3336 3337