1 /* $OpenBSD: event.c,v 1.25 2010/08/30 07:54:29 nicm Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 #ifdef HAVE_CONFIG_H 30 #include "config.h" 31 #endif 32 33 #ifdef WIN32 34 #define WIN32_LEAN_AND_MEAN 35 #include <windows.h> 36 #undef WIN32_LEAN_AND_MEAN 37 #endif 38 #include <sys/types.h> 39 #ifdef HAVE_SYS_TIME_H 40 #include <sys/time.h> 41 #else 42 #include <sys/_libevent_time.h> 43 #endif 44 #include <sys/queue.h> 45 #include <stdio.h> 46 #include <stdlib.h> 47 #ifndef WIN32 48 #include <unistd.h> 49 #endif 50 #include <errno.h> 51 #include <signal.h> 52 #include <string.h> 53 #include <assert.h> 54 #include <time.h> 55 56 #include "event.h" 57 #include "event-internal.h" 58 #include "evutil.h" 59 #include "log.h" 60 61 #ifdef HAVE_EVENT_PORTS 62 extern const struct eventop evportops; 63 #endif 64 #ifdef HAVE_SELECT 65 extern const struct eventop selectops; 66 #endif 67 #ifdef HAVE_POLL 68 extern const struct eventop pollops; 69 #endif 70 #ifdef HAVE_EPOLL 71 extern const struct eventop epollops; 72 #endif 73 #ifdef HAVE_WORKING_KQUEUE 74 extern const struct eventop kqops; 75 #endif 76 #ifdef HAVE_DEVPOLL 77 extern const struct eventop devpollops; 78 #endif 79 #ifdef WIN32 80 extern const struct eventop win32ops; 81 #endif 82 83 /* In order of preference */ 84 static const struct eventop *eventops[] = { 85 #ifdef HAVE_EVENT_PORTS 86 &evportops, 87 #endif 88 #ifdef HAVE_WORKING_KQUEUE 89 &kqops, 90 #endif 91 #ifdef HAVE_EPOLL 92 &epollops, 93 #endif 94 #ifdef HAVE_DEVPOLL 95 &devpollops, 96 #endif 97 #ifdef HAVE_POLL 98 &pollops, 99 #endif 100 #ifdef HAVE_SELECT 101 &selectops, 102 #endif 103 #ifdef WIN32 104 &win32ops, 105 #endif 106 NULL 107 }; 108 109 /* Global state */ 110 struct event_base *current_base = NULL; 111 extern struct event_base *evsignal_base; 112 static int use_monotonic; 113 114 /* Handle signals - This is a deprecated interface */ 115 int (*event_sigcb)(void); /* Signal callback when gotsig is set */ 116 volatile sig_atomic_t event_gotsig; /* Set in signal handler */ 117 118 /* Prototypes */ 119 static void event_queue_insert(struct event_base *, struct event *, int); 120 static void event_queue_remove(struct event_base *, struct event *, int); 121 static int event_haveevents(struct event_base *); 122 123 static void event_process_active(struct event_base *); 124 125 static int timeout_next(struct event_base *, struct timeval **); 126 static void timeout_process(struct event_base *); 127 static void timeout_correct(struct event_base *, struct timeval *); 128 129 static void 130 detect_monotonic(void) 131 { 132 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 133 struct timespec ts; 134 135 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) 136 use_monotonic = 1; 137 #endif 138 } 139 140 static int 141 gettime(struct event_base *base, struct timeval *tp) 142 { 143 if (base->tv_cache.tv_sec) { 144 *tp = base->tv_cache; 145 return (0); 146 } 147 148 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 149 if (use_monotonic) { 150 struct timespec ts; 151 152 if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) 153 return (-1); 154 155 tp->tv_sec = ts.tv_sec; 156 tp->tv_usec = ts.tv_nsec / 1000; 157 return (0); 158 } 159 #endif 160 161 return (evutil_gettimeofday(tp, NULL)); 162 } 163 164 struct event_base * 165 event_init(void) 166 { 167 struct event_base *base = event_base_new(); 168 169 if (base != NULL) 170 current_base = base; 171 172 return (base); 173 } 174 175 struct event_base * 176 event_base_new(void) 177 { 178 int i; 179 struct event_base *base; 180 181 if ((base = calloc(1, sizeof(struct event_base))) == NULL) 182 event_err(1, "%s: calloc", __func__); 183 184 event_sigcb = NULL; 185 event_gotsig = 0; 186 187 detect_monotonic(); 188 gettime(base, &base->event_tv); 189 190 min_heap_ctor(&base->timeheap); 191 TAILQ_INIT(&base->eventqueue); 192 base->sig.ev_signal_pair[0] = -1; 193 base->sig.ev_signal_pair[1] = -1; 194 195 base->evbase = NULL; 196 for (i = 0; eventops[i] && !base->evbase; i++) { 197 base->evsel = eventops[i]; 198 199 base->evbase = base->evsel->init(base); 200 } 201 202 if (base->evbase == NULL) 203 event_errx(1, "%s: no event mechanism available", __func__); 204 205 if (evutil_getenv("EVENT_SHOW_METHOD")) 206 event_msgx("libevent using: %s\n", 207 base->evsel->name); 208 209 /* allocate a single active event queue */ 210 event_base_priority_init(base, 1); 211 212 return (base); 213 } 214 215 void 216 event_base_free(struct event_base *base) 217 { 218 int i, n_deleted=0; 219 struct event *ev; 220 221 if (base == NULL && current_base) 222 base = current_base; 223 if (base == current_base) 224 current_base = NULL; 225 226 /* XXX(niels) - check for internal events first */ 227 assert(base); 228 /* Delete all non-internal events. */ 229 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) { 230 struct event *next = TAILQ_NEXT(ev, ev_next); 231 if (!(ev->ev_flags & EVLIST_INTERNAL)) { 232 event_del(ev); 233 ++n_deleted; 234 } 235 ev = next; 236 } 237 while ((ev = min_heap_top(&base->timeheap)) != NULL) { 238 event_del(ev); 239 ++n_deleted; 240 } 241 242 for (i = 0; i < base->nactivequeues; ++i) { 243 for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) { 244 struct event *next = TAILQ_NEXT(ev, ev_active_next); 245 if (!(ev->ev_flags & EVLIST_INTERNAL)) { 246 event_del(ev); 247 ++n_deleted; 248 } 249 ev = next; 250 } 251 } 252 253 if (n_deleted) 254 event_debug(("%s: %d events were still set in base", 255 __func__, n_deleted)); 256 257 if (base->evsel->dealloc != NULL) 258 base->evsel->dealloc(base, base->evbase); 259 260 for (i = 0; i < base->nactivequeues; ++i) 261 assert(TAILQ_EMPTY(base->activequeues[i])); 262 263 assert(min_heap_empty(&base->timeheap)); 264 min_heap_dtor(&base->timeheap); 265 266 for (i = 0; i < base->nactivequeues; ++i) 267 free(base->activequeues[i]); 268 free(base->activequeues); 269 270 assert(TAILQ_EMPTY(&base->eventqueue)); 271 272 free(base); 273 } 274 275 /* reinitialized the event base after a fork */ 276 int 277 event_reinit(struct event_base *base) 278 { 279 const struct eventop *evsel = base->evsel; 280 void *evbase = base->evbase; 281 int res = 0; 282 struct event *ev; 283 284 #if 0 285 /* Right now, reinit always takes effect, since even if the 286 backend doesn't require it, the signal socketpair code does. 287 */ 288 /* check if this event mechanism requires reinit */ 289 if (!evsel->need_reinit) 290 return (0); 291 #endif 292 293 /* prevent internal delete */ 294 if (base->sig.ev_signal_added) { 295 /* we cannot call event_del here because the base has 296 * not been reinitialized yet. */ 297 event_queue_remove(base, &base->sig.ev_signal, 298 EVLIST_INSERTED); 299 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) 300 event_queue_remove(base, &base->sig.ev_signal, 301 EVLIST_ACTIVE); 302 base->sig.ev_signal_added = 0; 303 } 304 305 if (base->evsel->dealloc != NULL) 306 base->evsel->dealloc(base, base->evbase); 307 evbase = base->evbase = evsel->init(base); 308 if (base->evbase == NULL) 309 event_errx(1, "%s: could not reinitialize event mechanism", 310 __func__); 311 312 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { 313 if (evsel->add(evbase, ev) == -1) 314 res = -1; 315 } 316 317 return (res); 318 } 319 320 int 321 event_priority_init(int npriorities) 322 { 323 return event_base_priority_init(current_base, npriorities); 324 } 325 326 int 327 event_base_priority_init(struct event_base *base, int npriorities) 328 { 329 int i; 330 331 if (base->event_count_active) 332 return (-1); 333 334 if (npriorities == base->nactivequeues) 335 return (0); 336 337 if (base->nactivequeues) { 338 for (i = 0; i < base->nactivequeues; ++i) { 339 free(base->activequeues[i]); 340 } 341 free(base->activequeues); 342 } 343 344 /* Allocate our priority queues */ 345 base->nactivequeues = npriorities; 346 base->activequeues = (struct event_list **) 347 calloc(base->nactivequeues, sizeof(struct event_list *)); 348 if (base->activequeues == NULL) 349 event_err(1, "%s: calloc", __func__); 350 351 for (i = 0; i < base->nactivequeues; ++i) { 352 base->activequeues[i] = malloc(sizeof(struct event_list)); 353 if (base->activequeues[i] == NULL) 354 event_err(1, "%s: malloc", __func__); 355 TAILQ_INIT(base->activequeues[i]); 356 } 357 358 return (0); 359 } 360 361 int 362 event_haveevents(struct event_base *base) 363 { 364 return (base->event_count > 0); 365 } 366 367 /* 368 * Active events are stored in priority queues. Lower priorities are always 369 * process before higher priorities. Low priority events can starve high 370 * priority ones. 371 */ 372 373 static void 374 event_process_active(struct event_base *base) 375 { 376 struct event *ev; 377 struct event_list *activeq = NULL; 378 int i; 379 short ncalls; 380 381 for (i = 0; i < base->nactivequeues; ++i) { 382 if (TAILQ_FIRST(base->activequeues[i]) != NULL) { 383 activeq = base->activequeues[i]; 384 break; 385 } 386 } 387 388 assert(activeq != NULL); 389 390 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { 391 if (ev->ev_events & EV_PERSIST) 392 event_queue_remove(base, ev, EVLIST_ACTIVE); 393 else 394 event_del(ev); 395 396 /* Allows deletes to work */ 397 ncalls = ev->ev_ncalls; 398 ev->ev_pncalls = &ncalls; 399 while (ncalls) { 400 ncalls--; 401 ev->ev_ncalls = ncalls; 402 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg); 403 if (event_gotsig || base->event_break) 404 return; 405 } 406 } 407 } 408 409 /* 410 * Wait continously for events. We exit only if no events are left. 411 */ 412 413 int 414 event_dispatch(void) 415 { 416 return (event_loop(0)); 417 } 418 419 int 420 event_base_dispatch(struct event_base *event_base) 421 { 422 return (event_base_loop(event_base, 0)); 423 } 424 425 const char * 426 event_base_get_method(struct event_base *base) 427 { 428 assert(base); 429 return (base->evsel->name); 430 } 431 432 static void 433 event_loopexit_cb(int fd, short what, void *arg) 434 { 435 struct event_base *base = arg; 436 base->event_gotterm = 1; 437 } 438 439 /* not thread safe */ 440 int 441 event_loopexit(const struct timeval *tv) 442 { 443 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, 444 current_base, tv)); 445 } 446 447 int 448 event_base_loopexit(struct event_base *event_base, const struct timeval *tv) 449 { 450 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, 451 event_base, tv)); 452 } 453 454 /* not thread safe */ 455 int 456 event_loopbreak(void) 457 { 458 return (event_base_loopbreak(current_base)); 459 } 460 461 int 462 event_base_loopbreak(struct event_base *event_base) 463 { 464 if (event_base == NULL) 465 return (-1); 466 467 event_base->event_break = 1; 468 return (0); 469 } 470 471 472 473 /* not thread safe */ 474 475 int 476 event_loop(int flags) 477 { 478 return event_base_loop(current_base, flags); 479 } 480 481 int 482 event_base_loop(struct event_base *base, int flags) 483 { 484 const struct eventop *evsel = base->evsel; 485 void *evbase = base->evbase; 486 struct timeval tv; 487 struct timeval *tv_p; 488 int res, done; 489 490 /* clear time cache */ 491 base->tv_cache.tv_sec = 0; 492 493 if (base->sig.ev_signal_added) 494 evsignal_base = base; 495 done = 0; 496 while (!done) { 497 /* Terminate the loop if we have been asked to */ 498 if (base->event_gotterm) { 499 base->event_gotterm = 0; 500 break; 501 } 502 503 if (base->event_break) { 504 base->event_break = 0; 505 break; 506 } 507 508 /* You cannot use this interface for multi-threaded apps */ 509 while (event_gotsig) { 510 event_gotsig = 0; 511 if (event_sigcb) { 512 res = (*event_sigcb)(); 513 if (res == -1) { 514 errno = EINTR; 515 return (-1); 516 } 517 } 518 } 519 520 timeout_correct(base, &tv); 521 522 tv_p = &tv; 523 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { 524 timeout_next(base, &tv_p); 525 } else { 526 /* 527 * if we have active events, we just poll new events 528 * without waiting. 529 */ 530 evutil_timerclear(&tv); 531 } 532 533 /* If we have no events, we just exit */ 534 if (!event_haveevents(base)) { 535 event_debug(("%s: no events registered.", __func__)); 536 return (1); 537 } 538 539 /* update last old time */ 540 gettime(base, &base->event_tv); 541 542 /* clear time cache */ 543 base->tv_cache.tv_sec = 0; 544 545 res = evsel->dispatch(base, evbase, tv_p); 546 547 if (res == -1) 548 return (-1); 549 gettime(base, &base->tv_cache); 550 551 timeout_process(base); 552 553 if (base->event_count_active) { 554 event_process_active(base); 555 if (!base->event_count_active && (flags & EVLOOP_ONCE)) 556 done = 1; 557 } else if (flags & EVLOOP_NONBLOCK) 558 done = 1; 559 } 560 561 /* clear time cache */ 562 base->tv_cache.tv_sec = 0; 563 564 event_debug(("%s: asked to terminate loop.", __func__)); 565 return (0); 566 } 567 568 /* Sets up an event for processing once */ 569 570 struct event_once { 571 struct event ev; 572 573 void (*cb)(int, short, void *); 574 void *arg; 575 }; 576 577 /* One-time callback, it deletes itself */ 578 579 static void 580 event_once_cb(int fd, short events, void *arg) 581 { 582 struct event_once *eonce = arg; 583 584 (*eonce->cb)(fd, events, eonce->arg); 585 free(eonce); 586 } 587 588 /* not threadsafe, event scheduled once. */ 589 int 590 event_once(int fd, short events, 591 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) 592 { 593 return event_base_once(current_base, fd, events, callback, arg, tv); 594 } 595 596 /* Schedules an event once */ 597 int 598 event_base_once(struct event_base *base, int fd, short events, 599 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) 600 { 601 struct event_once *eonce; 602 struct timeval etv; 603 int res; 604 605 /* We cannot support signals that just fire once */ 606 if (events & EV_SIGNAL) 607 return (-1); 608 609 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) 610 return (-1); 611 612 eonce->cb = callback; 613 eonce->arg = arg; 614 615 if (events == EV_TIMEOUT) { 616 if (tv == NULL) { 617 evutil_timerclear(&etv); 618 tv = &etv; 619 } 620 621 evtimer_set(&eonce->ev, event_once_cb, eonce); 622 } else if (events & (EV_READ|EV_WRITE)) { 623 events &= EV_READ|EV_WRITE; 624 625 event_set(&eonce->ev, fd, events, event_once_cb, eonce); 626 } else { 627 /* Bad event combination */ 628 free(eonce); 629 return (-1); 630 } 631 632 res = event_base_set(base, &eonce->ev); 633 if (res == 0) 634 res = event_add(&eonce->ev, tv); 635 if (res != 0) { 636 free(eonce); 637 return (res); 638 } 639 640 return (0); 641 } 642 643 void 644 event_set(struct event *ev, int fd, short events, 645 void (*callback)(int, short, void *), void *arg) 646 { 647 /* Take the current base - caller needs to set the real base later */ 648 ev->ev_base = current_base; 649 650 ev->ev_callback = callback; 651 ev->ev_arg = arg; 652 ev->ev_fd = fd; 653 ev->ev_events = events; 654 ev->ev_res = 0; 655 ev->ev_flags = EVLIST_INIT; 656 ev->ev_ncalls = 0; 657 ev->ev_pncalls = NULL; 658 659 min_heap_elem_init(ev); 660 661 /* by default, we put new events into the middle priority */ 662 if(current_base) 663 ev->ev_pri = current_base->nactivequeues/2; 664 } 665 666 int 667 event_base_set(struct event_base *base, struct event *ev) 668 { 669 /* Only innocent events may be assigned to a different base */ 670 if (ev->ev_flags != EVLIST_INIT) 671 return (-1); 672 673 ev->ev_base = base; 674 ev->ev_pri = base->nactivequeues/2; 675 676 return (0); 677 } 678 679 /* 680 * Set's the priority of an event - if an event is already scheduled 681 * changing the priority is going to fail. 682 */ 683 684 int 685 event_priority_set(struct event *ev, int pri) 686 { 687 if (ev->ev_flags & EVLIST_ACTIVE) 688 return (-1); 689 if (pri < 0 || pri >= ev->ev_base->nactivequeues) 690 return (-1); 691 692 ev->ev_pri = pri; 693 694 return (0); 695 } 696 697 /* 698 * Checks if a specific event is pending or scheduled. 699 */ 700 701 int 702 event_pending(struct event *ev, short event, struct timeval *tv) 703 { 704 struct timeval now, res; 705 int flags = 0; 706 707 if (ev->ev_flags & EVLIST_INSERTED) 708 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); 709 if (ev->ev_flags & EVLIST_ACTIVE) 710 flags |= ev->ev_res; 711 if (ev->ev_flags & EVLIST_TIMEOUT) 712 flags |= EV_TIMEOUT; 713 714 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); 715 716 /* See if there is a timeout that we should report */ 717 if (tv != NULL && (flags & event & EV_TIMEOUT)) { 718 gettime(ev->ev_base, &now); 719 evutil_timersub(&ev->ev_timeout, &now, &res); 720 /* correctly remap to real time */ 721 evutil_gettimeofday(&now, NULL); 722 evutil_timeradd(&now, &res, tv); 723 } 724 725 return (flags & event); 726 } 727 728 int 729 event_add(struct event *ev, const struct timeval *tv) 730 { 731 struct event_base *base = ev->ev_base; 732 const struct eventop *evsel = base->evsel; 733 void *evbase = base->evbase; 734 int res = 0; 735 736 event_debug(( 737 "event_add: event: %p, %s%s%scall %p", 738 ev, 739 ev->ev_events & EV_READ ? "EV_READ " : " ", 740 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", 741 tv ? "EV_TIMEOUT " : " ", 742 ev->ev_callback)); 743 744 assert(!(ev->ev_flags & ~EVLIST_ALL)); 745 746 /* 747 * prepare for timeout insertion further below, if we get a 748 * failure on any step, we should not change any state. 749 */ 750 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { 751 if (min_heap_reserve(&base->timeheap, 752 1 + min_heap_size(&base->timeheap)) == -1) 753 return (-1); /* ENOMEM == errno */ 754 } 755 756 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && 757 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { 758 res = evsel->add(evbase, ev); 759 if (res != -1) 760 event_queue_insert(base, ev, EVLIST_INSERTED); 761 } 762 763 /* 764 * we should change the timout state only if the previous event 765 * addition succeeded. 766 */ 767 if (res != -1 && tv != NULL) { 768 struct timeval now; 769 770 /* 771 * we already reserved memory above for the case where we 772 * are not replacing an exisiting timeout. 773 */ 774 if (ev->ev_flags & EVLIST_TIMEOUT) 775 event_queue_remove(base, ev, EVLIST_TIMEOUT); 776 777 /* Check if it is active due to a timeout. Rescheduling 778 * this timeout before the callback can be executed 779 * removes it from the active list. */ 780 if ((ev->ev_flags & EVLIST_ACTIVE) && 781 (ev->ev_res & EV_TIMEOUT)) { 782 /* See if we are just active executing this 783 * event in a loop 784 */ 785 if (ev->ev_ncalls && ev->ev_pncalls) { 786 /* Abort loop */ 787 *ev->ev_pncalls = 0; 788 } 789 790 event_queue_remove(base, ev, EVLIST_ACTIVE); 791 } 792 793 gettime(base, &now); 794 evutil_timeradd(&now, tv, &ev->ev_timeout); 795 796 event_debug(( 797 "event_add: timeout in %ld seconds, call %p", 798 tv->tv_sec, ev->ev_callback)); 799 800 event_queue_insert(base, ev, EVLIST_TIMEOUT); 801 } 802 803 return (res); 804 } 805 806 int 807 event_del(struct event *ev) 808 { 809 struct event_base *base; 810 const struct eventop *evsel; 811 void *evbase; 812 813 event_debug(("event_del: %p, callback %p", 814 ev, ev->ev_callback)); 815 816 /* An event without a base has not been added */ 817 if (ev->ev_base == NULL) 818 return (-1); 819 820 base = ev->ev_base; 821 evsel = base->evsel; 822 evbase = base->evbase; 823 824 assert(!(ev->ev_flags & ~EVLIST_ALL)); 825 826 /* See if we are just active executing this event in a loop */ 827 if (ev->ev_ncalls && ev->ev_pncalls) { 828 /* Abort loop */ 829 *ev->ev_pncalls = 0; 830 } 831 832 if (ev->ev_flags & EVLIST_TIMEOUT) 833 event_queue_remove(base, ev, EVLIST_TIMEOUT); 834 835 if (ev->ev_flags & EVLIST_ACTIVE) 836 event_queue_remove(base, ev, EVLIST_ACTIVE); 837 838 if (ev->ev_flags & EVLIST_INSERTED) { 839 event_queue_remove(base, ev, EVLIST_INSERTED); 840 return (evsel->del(evbase, ev)); 841 } 842 843 return (0); 844 } 845 846 void 847 event_active(struct event *ev, int res, short ncalls) 848 { 849 /* We get different kinds of events, add them together */ 850 if (ev->ev_flags & EVLIST_ACTIVE) { 851 ev->ev_res |= res; 852 return; 853 } 854 855 ev->ev_res = res; 856 ev->ev_ncalls = ncalls; 857 ev->ev_pncalls = NULL; 858 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); 859 } 860 861 static int 862 timeout_next(struct event_base *base, struct timeval **tv_p) 863 { 864 struct timeval now; 865 struct event *ev; 866 struct timeval *tv = *tv_p; 867 868 if ((ev = min_heap_top(&base->timeheap)) == NULL) { 869 /* if no time-based events are active wait for I/O */ 870 *tv_p = NULL; 871 return (0); 872 } 873 874 if (gettime(base, &now) == -1) 875 return (-1); 876 877 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { 878 evutil_timerclear(tv); 879 return (0); 880 } 881 882 evutil_timersub(&ev->ev_timeout, &now, tv); 883 884 assert(tv->tv_sec >= 0); 885 assert(tv->tv_usec >= 0); 886 887 event_debug(("timeout_next: in %ld seconds", tv->tv_sec)); 888 return (0); 889 } 890 891 /* 892 * Determines if the time is running backwards by comparing the current 893 * time against the last time we checked. Not needed when using clock 894 * monotonic. 895 */ 896 897 static void 898 timeout_correct(struct event_base *base, struct timeval *tv) 899 { 900 struct event **pev; 901 unsigned int size; 902 struct timeval off; 903 904 if (use_monotonic) 905 return; 906 907 /* Check if time is running backwards */ 908 gettime(base, tv); 909 if (evutil_timercmp(tv, &base->event_tv, >=)) { 910 base->event_tv = *tv; 911 return; 912 } 913 914 event_debug(("%s: time is running backwards, corrected", 915 __func__)); 916 evutil_timersub(&base->event_tv, tv, &off); 917 918 /* 919 * We can modify the key element of the node without destroying 920 * the key, beause we apply it to all in the right order. 921 */ 922 pev = base->timeheap.p; 923 size = base->timeheap.n; 924 for (; size-- > 0; ++pev) { 925 struct timeval *ev_tv = &(**pev).ev_timeout; 926 evutil_timersub(ev_tv, &off, ev_tv); 927 } 928 /* Now remember what the new time turned out to be. */ 929 base->event_tv = *tv; 930 } 931 932 void 933 timeout_process(struct event_base *base) 934 { 935 struct timeval now; 936 struct event *ev; 937 938 if (min_heap_empty(&base->timeheap)) 939 return; 940 941 gettime(base, &now); 942 943 while ((ev = min_heap_top(&base->timeheap))) { 944 if (evutil_timercmp(&ev->ev_timeout, &now, >)) 945 break; 946 947 /* delete this event from the I/O queues */ 948 event_del(ev); 949 950 event_debug(("timeout_process: call %p", 951 ev->ev_callback)); 952 event_active(ev, EV_TIMEOUT, 1); 953 } 954 } 955 956 void 957 event_queue_remove(struct event_base *base, struct event *ev, int queue) 958 { 959 if (!(ev->ev_flags & queue)) 960 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, 961 ev, ev->ev_fd, queue); 962 963 if (~ev->ev_flags & EVLIST_INTERNAL) 964 base->event_count--; 965 966 ev->ev_flags &= ~queue; 967 switch (queue) { 968 case EVLIST_INSERTED: 969 TAILQ_REMOVE(&base->eventqueue, ev, ev_next); 970 break; 971 case EVLIST_ACTIVE: 972 base->event_count_active--; 973 TAILQ_REMOVE(base->activequeues[ev->ev_pri], 974 ev, ev_active_next); 975 break; 976 case EVLIST_TIMEOUT: 977 min_heap_erase(&base->timeheap, ev); 978 break; 979 default: 980 event_errx(1, "%s: unknown queue %x", __func__, queue); 981 } 982 } 983 984 void 985 event_queue_insert(struct event_base *base, struct event *ev, int queue) 986 { 987 if (ev->ev_flags & queue) { 988 /* Double insertion is possible for active events */ 989 if (queue & EVLIST_ACTIVE) 990 return; 991 992 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, 993 ev, ev->ev_fd, queue); 994 } 995 996 if (~ev->ev_flags & EVLIST_INTERNAL) 997 base->event_count++; 998 999 ev->ev_flags |= queue; 1000 switch (queue) { 1001 case EVLIST_INSERTED: 1002 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); 1003 break; 1004 case EVLIST_ACTIVE: 1005 base->event_count_active++; 1006 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], 1007 ev,ev_active_next); 1008 break; 1009 case EVLIST_TIMEOUT: { 1010 min_heap_push(&base->timeheap, ev); 1011 break; 1012 } 1013 default: 1014 event_errx(1, "%s: unknown queue %x", __func__, queue); 1015 } 1016 } 1017 1018 /* Functions for debugging */ 1019 1020 const char * 1021 event_get_version(void) 1022 { 1023 return ("1.4.14b-stable"); 1024 } 1025 1026 /* 1027 * No thread-safe interface needed - the information should be the same 1028 * for all threads. 1029 */ 1030 1031 const char * 1032 event_get_method(void) 1033 { 1034 return (current_base->evsel->name); 1035 } 1036