1 /* $OpenBSD: event.c,v 1.19 2008/05/02 18:26:42 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #ifdef HAVE_CONFIG_H 30 #include "config.h" 31 #endif 32 33 #ifdef WIN32 34 #define WIN32_LEAN_AND_MEAN 35 #include <windows.h> 36 #undef WIN32_LEAN_AND_MEAN 37 #include "misc.h" 38 #endif 39 #include <sys/types.h> 40 #include <sys/tree.h> 41 #ifdef HAVE_SYS_TIME_H 42 #include <sys/time.h> 43 #else 44 #include <sys/_time.h> 45 #endif 46 #include <sys/queue.h> 47 #include <stdio.h> 48 #include <stdlib.h> 49 #ifndef WIN32 50 #include <unistd.h> 51 #endif 52 #include <errno.h> 53 #include <signal.h> 54 #include <string.h> 55 #include <assert.h> 56 #include <time.h> 57 58 #include "event.h" 59 #include "event-internal.h" 60 #include "log.h" 61 62 #ifdef HAVE_EVENT_PORTS 63 extern const struct eventop evportops; 64 #endif 65 #ifdef HAVE_SELECT 66 extern const struct eventop selectops; 67 #endif 68 #ifdef HAVE_POLL 69 extern const struct eventop pollops; 70 #endif 71 #ifdef HAVE_RTSIG 72 extern const struct eventop rtsigops; 73 #endif 74 #ifdef HAVE_EPOLL 75 extern const struct eventop epollops; 76 #endif 77 #ifdef HAVE_WORKING_KQUEUE 78 extern const struct eventop kqops; 79 #endif 80 #ifdef HAVE_DEVPOLL 81 extern const struct eventop devpollops; 82 #endif 83 #ifdef WIN32 84 extern const struct eventop win32ops; 85 #endif 86 87 /* In order of preference */ 88 const struct eventop *eventops[] = { 89 #ifdef HAVE_EVENT_PORTS 90 &evportops, 91 #endif 92 #ifdef HAVE_WORKING_KQUEUE 93 &kqops, 94 #endif 95 #ifdef HAVE_EPOLL 96 &epollops, 97 #endif 98 #ifdef HAVE_DEVPOLL 99 &devpollops, 100 #endif 101 #ifdef HAVE_RTSIG 102 &rtsigops, 103 #endif 104 #ifdef HAVE_POLL 105 &pollops, 106 #endif 107 #ifdef HAVE_SELECT 108 &selectops, 109 #endif 110 #ifdef WIN32 111 &win32ops, 112 #endif 113 NULL 114 }; 115 116 /* Global state */ 117 struct event_base *current_base = NULL; 118 extern struct event_base *evsignal_base; 119 static int use_monotonic; 120 121 /* Handle signals - This is a deprecated interface */ 122 int (*event_sigcb)(void); /* Signal callback when gotsig is set */ 123 volatile sig_atomic_t event_gotsig; /* Set in signal handler */ 124 125 /* Prototypes */ 126 static void event_queue_insert(struct event_base *, struct event *, int); 127 static void event_queue_remove(struct event_base *, struct event *, int); 128 static int event_haveevents(struct event_base *); 129 130 static void event_process_active(struct event_base *); 131 132 static int timeout_next(struct event_base *, struct timeval **); 133 static void timeout_process(struct event_base *); 134 static void timeout_correct(struct event_base *, struct timeval *); 135 136 static int 137 compare(struct event *a, struct event *b) 138 { 139 if (timercmp(&a->ev_timeout, &b->ev_timeout, <)) 140 return (-1); 141 else if (timercmp(&a->ev_timeout, &b->ev_timeout, >)) 142 return (1); 143 if (a < b) 144 return (-1); 145 else if (a > b) 146 return (1); 147 return (0); 148 } 149 150 static void 151 detect_monotonic(void) 152 { 153 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 154 struct timespec ts; 155 156 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) 157 use_monotonic = 1; 158 #endif 159 } 160 161 static int 162 gettime(struct timeval *tp) 163 { 164 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 165 struct timespec ts; 166 167 if (use_monotonic) { 168 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) 169 return (-1); 170 171 tp->tv_sec = ts.tv_sec; 172 tp->tv_usec = ts.tv_nsec / 1000; 173 return (0); 174 } 175 #endif 176 177 return (gettimeofday(tp, NULL)); 178 } 179 180 RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare); 181 182 RB_GENERATE(event_tree, event, ev_timeout_node, compare); 183 184 185 struct event_base * 186 event_init(void) 187 { 188 int i; 189 struct event_base *base; 190 191 if ((base = calloc(1, sizeof(struct event_base))) == NULL) 192 event_err(1, "%s: calloc"); 193 194 event_sigcb = NULL; 195 event_gotsig = 0; 196 197 detect_monotonic(); 198 gettime(&base->event_tv); 199 200 RB_INIT(&base->timetree); 201 TAILQ_INIT(&base->eventqueue); 202 TAILQ_INIT(&base->sig.signalqueue); 203 base->sig.ev_signal_pair[0] = -1; 204 base->sig.ev_signal_pair[1] = -1; 205 206 base->evbase = NULL; 207 for (i = 0; eventops[i] && !base->evbase; i++) { 208 base->evsel = eventops[i]; 209 210 base->evbase = base->evsel->init(base); 211 } 212 213 if (base->evbase == NULL) 214 event_errx(1, "%s: no event mechanism available", __func__); 215 216 if (getenv("EVENT_SHOW_METHOD")) 217 event_msgx("libevent using: %s\n", 218 base->evsel->name); 219 220 /* allocate a single active event queue */ 221 event_base_priority_init(base, 1); 222 223 current_base = base; 224 return (base); 225 } 226 227 void 228 event_base_free(struct event_base *base) 229 { 230 int i; 231 232 if (base == NULL && current_base) 233 base = current_base; 234 if (base == current_base) 235 current_base = NULL; 236 237 assert(base); 238 if (base->evsel->dealloc != NULL) 239 base->evsel->dealloc(base, base->evbase); 240 for (i=0; i < base->nactivequeues; ++i) 241 assert(TAILQ_EMPTY(base->activequeues[i])); 242 243 assert(RB_EMPTY(&base->timetree)); 244 245 for (i = 0; i < base->nactivequeues; ++i) 246 free(base->activequeues[i]); 247 free(base->activequeues); 248 249 assert(TAILQ_EMPTY(&base->eventqueue)); 250 251 free(base); 252 } 253 254 int 255 event_priority_init(int npriorities) 256 { 257 return event_base_priority_init(current_base, npriorities); 258 } 259 260 int 261 event_base_priority_init(struct event_base *base, int npriorities) 262 { 263 int i; 264 265 if (base->event_count_active) 266 return (-1); 267 268 if (base->nactivequeues && npriorities != base->nactivequeues) { 269 for (i = 0; i < base->nactivequeues; ++i) { 270 free(base->activequeues[i]); 271 } 272 free(base->activequeues); 273 } 274 275 /* Allocate our priority queues */ 276 base->nactivequeues = npriorities; 277 base->activequeues = (struct event_list **)calloc(base->nactivequeues, 278 npriorities * sizeof(struct event_list *)); 279 if (base->activequeues == NULL) 280 event_err(1, "%s: calloc", __func__); 281 282 for (i = 0; i < base->nactivequeues; ++i) { 283 base->activequeues[i] = malloc(sizeof(struct event_list)); 284 if (base->activequeues[i] == NULL) 285 event_err(1, "%s: malloc", __func__); 286 TAILQ_INIT(base->activequeues[i]); 287 } 288 289 return (0); 290 } 291 292 int 293 event_haveevents(struct event_base *base) 294 { 295 return (base->event_count > 0); 296 } 297 298 /* 299 * Active events are stored in priority queues. Lower priorities are always 300 * process before higher priorities. Low priority events can starve high 301 * priority ones. 302 */ 303 304 static void 305 event_process_active(struct event_base *base) 306 { 307 struct event *ev; 308 struct event_list *activeq = NULL; 309 int i; 310 short ncalls; 311 312 if (!base->event_count_active) 313 return; 314 315 for (i = 0; i < base->nactivequeues; ++i) { 316 if (TAILQ_FIRST(base->activequeues[i]) != NULL) { 317 activeq = base->activequeues[i]; 318 break; 319 } 320 } 321 322 assert(activeq != NULL); 323 324 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { 325 event_queue_remove(base, ev, EVLIST_ACTIVE); 326 327 /* Allows deletes to work */ 328 ncalls = ev->ev_ncalls; 329 ev->ev_pncalls = &ncalls; 330 while (ncalls) { 331 ncalls--; 332 ev->ev_ncalls = ncalls; 333 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); 334 if (event_gotsig) 335 return; 336 } 337 } 338 } 339 340 /* 341 * Wait continously for events. We exit only if no events are left. 342 */ 343 344 int 345 event_dispatch(void) 346 { 347 return (event_loop(0)); 348 } 349 350 int 351 event_base_dispatch(struct event_base *event_base) 352 { 353 return (event_base_loop(event_base, 0)); 354 } 355 356 static void 357 event_loopexit_cb(int fd, short what, void *arg) 358 { 359 struct event_base *base = arg; 360 base->event_gotterm = 1; 361 } 362 363 /* not thread safe */ 364 int 365 event_loopexit(struct timeval *tv) 366 { 367 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, 368 current_base, tv)); 369 } 370 371 int 372 event_base_loopexit(struct event_base *event_base, struct timeval *tv) 373 { 374 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, 375 event_base, tv)); 376 } 377 378 /* not thread safe */ 379 380 int 381 event_loop(int flags) 382 { 383 return event_base_loop(current_base, flags); 384 } 385 386 int 387 event_base_loop(struct event_base *base, int flags) 388 { 389 const struct eventop *evsel = base->evsel; 390 void *evbase = base->evbase; 391 struct timeval tv; 392 struct timeval *tv_p; 393 int res, done; 394 395 #ifndef WIN32 396 if(!TAILQ_EMPTY(&base->sig.signalqueue)) 397 evsignal_base = base; 398 #endif 399 done = 0; 400 while (!done) { 401 /* Calculate the initial events that we are waiting for */ 402 if (evsel->recalc(base, evbase, 0) == -1) 403 return (-1); 404 405 /* Terminate the loop if we have been asked to */ 406 if (base->event_gotterm) { 407 base->event_gotterm = 0; 408 break; 409 } 410 411 /* You cannot use this interface for multi-threaded apps */ 412 while (event_gotsig) { 413 event_gotsig = 0; 414 if (event_sigcb) { 415 res = (*event_sigcb)(); 416 if (res == -1) { 417 errno = EINTR; 418 return (-1); 419 } 420 } 421 } 422 423 timeout_correct(base, &tv); 424 425 tv_p = &tv; 426 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { 427 timeout_next(base, &tv_p); 428 } else { 429 /* 430 * if we have active events, we just poll new events 431 * without waiting. 432 */ 433 timerclear(&tv); 434 } 435 436 /* If we have no events, we just exit */ 437 if (!event_haveevents(base)) { 438 event_debug(("%s: no events registered.", __func__)); 439 return (1); 440 } 441 442 res = evsel->dispatch(base, evbase, tv_p); 443 444 if (res == -1) 445 return (-1); 446 447 timeout_process(base); 448 449 if (base->event_count_active) { 450 event_process_active(base); 451 if (!base->event_count_active && (flags & EVLOOP_ONCE)) 452 done = 1; 453 } else if (flags & EVLOOP_NONBLOCK) 454 done = 1; 455 } 456 457 event_debug(("%s: asked to terminate loop.", __func__)); 458 return (0); 459 } 460 461 /* Sets up an event for processing once */ 462 463 struct event_once { 464 struct event ev; 465 466 void (*cb)(int, short, void *); 467 void *arg; 468 }; 469 470 /* One-time callback, it deletes itself */ 471 472 static void 473 event_once_cb(int fd, short events, void *arg) 474 { 475 struct event_once *eonce = arg; 476 477 (*eonce->cb)(fd, events, eonce->arg); 478 free(eonce); 479 } 480 481 /* not threadsafe, event scheduled once. */ 482 int 483 event_once(int fd, short events, 484 void (*callback)(int, short, void *), void *arg, struct timeval *tv) 485 { 486 return event_base_once(current_base, fd, events, callback, arg, tv); 487 } 488 489 /* Schedules an event once */ 490 int 491 event_base_once(struct event_base *base, int fd, short events, 492 void (*callback)(int, short, void *), void *arg, struct timeval *tv) 493 { 494 struct event_once *eonce; 495 struct timeval etv; 496 int res; 497 498 /* We cannot support signals that just fire once */ 499 if (events & EV_SIGNAL) 500 return (-1); 501 502 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) 503 return (-1); 504 505 eonce->cb = callback; 506 eonce->arg = arg; 507 508 if (events == EV_TIMEOUT) { 509 if (tv == NULL) { 510 timerclear(&etv); 511 tv = &etv; 512 } 513 514 evtimer_set(&eonce->ev, event_once_cb, eonce); 515 } else if (events & (EV_READ|EV_WRITE)) { 516 events &= EV_READ|EV_WRITE; 517 518 event_set(&eonce->ev, fd, events, event_once_cb, eonce); 519 } else { 520 /* Bad event combination */ 521 free(eonce); 522 return (-1); 523 } 524 525 res = event_base_set(base, &eonce->ev); 526 if (res == 0) 527 res = event_add(&eonce->ev, tv); 528 if (res != 0) { 529 free(eonce); 530 return (res); 531 } 532 533 return (0); 534 } 535 536 void 537 event_set(struct event *ev, int fd, short events, 538 void (*callback)(int, short, void *), void *arg) 539 { 540 /* Take the current base - caller needs to set the real base later */ 541 ev->ev_base = current_base; 542 543 ev->ev_callback = callback; 544 ev->ev_arg = arg; 545 ev->ev_fd = fd; 546 ev->ev_events = events; 547 ev->ev_res = 0; 548 ev->ev_flags = EVLIST_INIT; 549 ev->ev_ncalls = 0; 550 ev->ev_pncalls = NULL; 551 552 /* by default, we put new events into the middle priority */ 553 if(current_base) 554 ev->ev_pri = current_base->nactivequeues/2; 555 } 556 557 int 558 event_base_set(struct event_base *base, struct event *ev) 559 { 560 /* Only innocent events may be assigned to a different base */ 561 if (ev->ev_flags != EVLIST_INIT) 562 return (-1); 563 564 ev->ev_base = base; 565 ev->ev_pri = base->nactivequeues/2; 566 567 return (0); 568 } 569 570 /* 571 * Set's the priority of an event - if an event is already scheduled 572 * changing the priority is going to fail. 573 */ 574 575 int 576 event_priority_set(struct event *ev, int pri) 577 { 578 if (ev->ev_flags & EVLIST_ACTIVE) 579 return (-1); 580 if (pri < 0 || pri >= ev->ev_base->nactivequeues) 581 return (-1); 582 583 ev->ev_pri = pri; 584 585 return (0); 586 } 587 588 /* 589 * Checks if a specific event is pending or scheduled. 590 */ 591 592 int 593 event_pending(struct event *ev, short event, struct timeval *tv) 594 { 595 struct timeval now, res; 596 int flags = 0; 597 598 if (ev->ev_flags & EVLIST_INSERTED) 599 flags |= (ev->ev_events & (EV_READ|EV_WRITE)); 600 if (ev->ev_flags & EVLIST_ACTIVE) 601 flags |= ev->ev_res; 602 if (ev->ev_flags & EVLIST_TIMEOUT) 603 flags |= EV_TIMEOUT; 604 if (ev->ev_flags & EVLIST_SIGNAL) 605 flags |= EV_SIGNAL; 606 607 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); 608 609 /* See if there is a timeout that we should report */ 610 if (tv != NULL && (flags & event & EV_TIMEOUT)) { 611 gettime(&now); 612 timersub(&ev->ev_timeout, &now, &res); 613 /* correctly remap to real time */ 614 gettimeofday(&now, NULL); 615 timeradd(&now, &res, tv); 616 } 617 618 return (flags & event); 619 } 620 621 int 622 event_add(struct event *ev, struct timeval *tv) 623 { 624 struct event_base *base = ev->ev_base; 625 const struct eventop *evsel = base->evsel; 626 void *evbase = base->evbase; 627 628 event_debug(( 629 "event_add: event: %p, %s%s%scall %p", 630 ev, 631 ev->ev_events & EV_READ ? "EV_READ " : " ", 632 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", 633 tv ? "EV_TIMEOUT " : " ", 634 ev->ev_callback)); 635 636 assert(!(ev->ev_flags & ~EVLIST_ALL)); 637 638 if (tv != NULL) { 639 struct timeval now; 640 641 if (ev->ev_flags & EVLIST_TIMEOUT) 642 event_queue_remove(base, ev, EVLIST_TIMEOUT); 643 644 /* Check if it is active due to a timeout. Rescheduling 645 * this timeout before the callback can be executed 646 * removes it from the active list. */ 647 if ((ev->ev_flags & EVLIST_ACTIVE) && 648 (ev->ev_res & EV_TIMEOUT)) { 649 /* See if we are just active executing this 650 * event in a loop 651 */ 652 if (ev->ev_ncalls && ev->ev_pncalls) { 653 /* Abort loop */ 654 *ev->ev_pncalls = 0; 655 } 656 657 event_queue_remove(base, ev, EVLIST_ACTIVE); 658 } 659 660 gettime(&now); 661 timeradd(&now, tv, &ev->ev_timeout); 662 663 event_debug(( 664 "event_add: timeout in %d seconds, call %p", 665 tv->tv_sec, ev->ev_callback)); 666 667 event_queue_insert(base, ev, EVLIST_TIMEOUT); 668 } 669 670 if ((ev->ev_events & (EV_READ|EV_WRITE)) && 671 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { 672 event_queue_insert(base, ev, EVLIST_INSERTED); 673 674 return (evsel->add(evbase, ev)); 675 } else if ((ev->ev_events & EV_SIGNAL) && 676 !(ev->ev_flags & EVLIST_SIGNAL)) { 677 event_queue_insert(base, ev, EVLIST_SIGNAL); 678 679 return (evsel->add(evbase, ev)); 680 } 681 682 return (0); 683 } 684 685 int 686 event_del(struct event *ev) 687 { 688 struct event_base *base; 689 const struct eventop *evsel; 690 void *evbase; 691 692 event_debug(("event_del: %p, callback %p", 693 ev, ev->ev_callback)); 694 695 /* An event without a base has not been added */ 696 if (ev->ev_base == NULL) 697 return (-1); 698 699 base = ev->ev_base; 700 evsel = base->evsel; 701 evbase = base->evbase; 702 703 assert(!(ev->ev_flags & ~EVLIST_ALL)); 704 705 /* See if we are just active executing this event in a loop */ 706 if (ev->ev_ncalls && ev->ev_pncalls) { 707 /* Abort loop */ 708 *ev->ev_pncalls = 0; 709 } 710 711 if (ev->ev_flags & EVLIST_TIMEOUT) 712 event_queue_remove(base, ev, EVLIST_TIMEOUT); 713 714 if (ev->ev_flags & EVLIST_ACTIVE) 715 event_queue_remove(base, ev, EVLIST_ACTIVE); 716 717 if (ev->ev_flags & EVLIST_INSERTED) { 718 event_queue_remove(base, ev, EVLIST_INSERTED); 719 return (evsel->del(evbase, ev)); 720 } else if (ev->ev_flags & EVLIST_SIGNAL) { 721 event_queue_remove(base, ev, EVLIST_SIGNAL); 722 return (evsel->del(evbase, ev)); 723 } 724 725 return (0); 726 } 727 728 void 729 event_active(struct event *ev, int res, short ncalls) 730 { 731 /* We get different kinds of events, add them together */ 732 if (ev->ev_flags & EVLIST_ACTIVE) { 733 ev->ev_res |= res; 734 return; 735 } 736 737 ev->ev_res = res; 738 ev->ev_ncalls = ncalls; 739 ev->ev_pncalls = NULL; 740 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); 741 } 742 743 static int 744 timeout_next(struct event_base *base, struct timeval **tv_p) 745 { 746 struct timeval now; 747 struct event *ev; 748 struct timeval *tv = *tv_p; 749 750 if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) { 751 /* if no time-based events are active wait for I/O */ 752 *tv_p = NULL; 753 return (0); 754 } 755 756 if (gettime(&now) == -1) 757 return (-1); 758 759 if (timercmp(&ev->ev_timeout, &now, <=)) { 760 timerclear(tv); 761 return (0); 762 } 763 764 timersub(&ev->ev_timeout, &now, tv); 765 766 assert(tv->tv_sec >= 0); 767 assert(tv->tv_usec >= 0); 768 769 event_debug(("timeout_next: in %d seconds", tv->tv_sec)); 770 return (0); 771 } 772 773 /* 774 * Determines if the time is running backwards by comparing the current 775 * time against the last time we checked. Not needed when using clock 776 * monotonic. 777 */ 778 779 static void 780 timeout_correct(struct event_base *base, struct timeval *tv) 781 { 782 struct event *ev; 783 struct timeval off; 784 785 if (use_monotonic) 786 return; 787 788 /* Check if time is running backwards */ 789 gettime(tv); 790 if (timercmp(tv, &base->event_tv, >=)) { 791 base->event_tv = *tv; 792 return; 793 } 794 795 event_debug(("%s: time is running backwards, corrected", 796 __func__)); 797 timersub(&base->event_tv, tv, &off); 798 799 /* 800 * We can modify the key element of the node without destroying 801 * the key, beause we apply it to all in the right order. 802 */ 803 RB_FOREACH(ev, event_tree, &base->timetree) 804 timersub(&ev->ev_timeout, &off, &ev->ev_timeout); 805 } 806 807 void 808 timeout_process(struct event_base *base) 809 { 810 struct timeval now; 811 struct event *ev, *next; 812 813 gettime(&now); 814 815 for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) { 816 if (timercmp(&ev->ev_timeout, &now, >)) 817 break; 818 next = RB_NEXT(event_tree, &base->timetree, ev); 819 820 event_queue_remove(base, ev, EVLIST_TIMEOUT); 821 822 /* delete this event from the I/O queues */ 823 event_del(ev); 824 825 event_debug(("timeout_process: call %p", 826 ev->ev_callback)); 827 event_active(ev, EV_TIMEOUT, 1); 828 } 829 } 830 831 void 832 event_queue_remove(struct event_base *base, struct event *ev, int queue) 833 { 834 int docount = 1; 835 836 if (!(ev->ev_flags & queue)) 837 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, 838 ev, ev->ev_fd, queue); 839 840 if (ev->ev_flags & EVLIST_INTERNAL) 841 docount = 0; 842 843 if (docount) 844 base->event_count--; 845 846 ev->ev_flags &= ~queue; 847 switch (queue) { 848 case EVLIST_ACTIVE: 849 if (docount) 850 base->event_count_active--; 851 TAILQ_REMOVE(base->activequeues[ev->ev_pri], 852 ev, ev_active_next); 853 break; 854 case EVLIST_SIGNAL: 855 TAILQ_REMOVE(&base->sig.signalqueue, ev, ev_signal_next); 856 break; 857 case EVLIST_TIMEOUT: 858 RB_REMOVE(event_tree, &base->timetree, ev); 859 break; 860 case EVLIST_INSERTED: 861 TAILQ_REMOVE(&base->eventqueue, ev, ev_next); 862 break; 863 default: 864 event_errx(1, "%s: unknown queue %x", __func__, queue); 865 } 866 } 867 868 void 869 event_queue_insert(struct event_base *base, struct event *ev, int queue) 870 { 871 int docount = 1; 872 873 if (ev->ev_flags & queue) { 874 /* Double insertion is possible for active events */ 875 if (queue & EVLIST_ACTIVE) 876 return; 877 878 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, 879 ev, ev->ev_fd, queue); 880 } 881 882 if (ev->ev_flags & EVLIST_INTERNAL) 883 docount = 0; 884 885 if (docount) 886 base->event_count++; 887 888 ev->ev_flags |= queue; 889 switch (queue) { 890 case EVLIST_ACTIVE: 891 if (docount) 892 base->event_count_active++; 893 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], 894 ev,ev_active_next); 895 break; 896 case EVLIST_SIGNAL: 897 TAILQ_INSERT_TAIL(&base->sig.signalqueue, ev, ev_signal_next); 898 break; 899 case EVLIST_TIMEOUT: { 900 struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev); 901 assert(tmp == NULL); 902 break; 903 } 904 case EVLIST_INSERTED: 905 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); 906 break; 907 default: 908 event_errx(1, "%s: unknown queue %x", __func__, queue); 909 } 910 } 911 912 /* Functions for debugging */ 913 914 const char * 915 event_get_version(void) 916 { 917 return (LIBEVENT_VERSION); 918 } 919 920 /* 921 * No thread-safe interface needed - the information should be the same 922 * for all threads. 923 */ 924 925 const char * 926 event_get_method(void) 927 { 928 return (current_base->evsel->name); 929 } 930