1*eabc0478Schristos /* $NetBSD: event.c,v 1.7 2024/08/18 20:47:21 christos Exp $ */ 28585484eSchristos 38585484eSchristos /* 48585484eSchristos * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> 58585484eSchristos * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 68585484eSchristos * 78585484eSchristos * Redistribution and use in source and binary forms, with or without 88585484eSchristos * modification, are permitted provided that the following conditions 98585484eSchristos * are met: 108585484eSchristos * 1. Redistributions of source code must retain the above copyright 118585484eSchristos * notice, this list of conditions and the following disclaimer. 128585484eSchristos * 2. Redistributions in binary form must reproduce the above copyright 138585484eSchristos * notice, this list of conditions and the following disclaimer in the 148585484eSchristos * documentation and/or other materials provided with the distribution. 158585484eSchristos * 3. The name of the author may not be used to endorse or promote products 168585484eSchristos * derived from this software without specific prior written permission. 178585484eSchristos * 188585484eSchristos * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198585484eSchristos * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208585484eSchristos * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218585484eSchristos * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228585484eSchristos * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238585484eSchristos * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248585484eSchristos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258585484eSchristos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268585484eSchristos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278585484eSchristos * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288585484eSchristos */ 298585484eSchristos #include "event2/event-config.h" 308585484eSchristos #include "evconfig-private.h" 318585484eSchristos 328585484eSchristos #ifdef _WIN32 338585484eSchristos #include <winsock2.h> 348585484eSchristos #define WIN32_LEAN_AND_MEAN 358585484eSchristos #include <windows.h> 368585484eSchristos #undef WIN32_LEAN_AND_MEAN 378585484eSchristos #endif 388585484eSchristos #include <sys/types.h> 398585484eSchristos #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) 408585484eSchristos #include <sys/time.h> 418585484eSchristos #endif 428585484eSchristos #include <sys/queue.h> 438585484eSchristos #ifdef EVENT__HAVE_SYS_SOCKET_H 448585484eSchristos #include <sys/socket.h> 458585484eSchristos #endif 468585484eSchristos #include <stdio.h> 478585484eSchristos #include <stdlib.h> 488585484eSchristos #ifdef EVENT__HAVE_UNISTD_H 498585484eSchristos #include <unistd.h> 508585484eSchristos #endif 518585484eSchristos #include <ctype.h> 528585484eSchristos #include <errno.h> 538585484eSchristos #include <signal.h> 548585484eSchristos #include <string.h> 558585484eSchristos #include <time.h> 568585484eSchristos #include <limits.h> 57*eabc0478Schristos #ifdef EVENT__HAVE_FCNTL_H 58*eabc0478Schristos #include <fcntl.h> 59*eabc0478Schristos #endif 608585484eSchristos 618585484eSchristos #include "event2/event.h" 628585484eSchristos #include "event2/event_struct.h" 638585484eSchristos #include "event2/event_compat.h" 648585484eSchristos #include "event-internal.h" 658585484eSchristos #include "defer-internal.h" 668585484eSchristos #include "evthread-internal.h" 678585484eSchristos #include "event2/thread.h" 688585484eSchristos #include "event2/util.h" 698585484eSchristos #include "log-internal.h" 708585484eSchristos #include "evmap-internal.h" 718585484eSchristos #include "iocp-internal.h" 728585484eSchristos #include "changelist-internal.h" 738585484eSchristos #define HT_NO_CACHE_HASH_VALUES 748585484eSchristos #include "ht-internal.h" 758585484eSchristos #include "util-internal.h" 768585484eSchristos 778585484eSchristos 788585484eSchristos #ifdef EVENT__HAVE_WORKING_KQUEUE 798585484eSchristos #include "kqueue-internal.h" 808585484eSchristos #endif 818585484eSchristos 828585484eSchristos #ifdef EVENT__HAVE_EVENT_PORTS 838585484eSchristos extern const struct eventop evportops; 848585484eSchristos #endif 858585484eSchristos #ifdef EVENT__HAVE_SELECT 868585484eSchristos extern const struct eventop selectops; 878585484eSchristos #endif 888585484eSchristos #ifdef EVENT__HAVE_POLL 898585484eSchristos extern const struct eventop pollops; 908585484eSchristos #endif 918585484eSchristos #ifdef EVENT__HAVE_EPOLL 928585484eSchristos extern const struct eventop epollops; 938585484eSchristos #endif 948585484eSchristos #ifdef EVENT__HAVE_WORKING_KQUEUE 958585484eSchristos extern const struct eventop kqops; 968585484eSchristos #endif 978585484eSchristos #ifdef EVENT__HAVE_DEVPOLL 988585484eSchristos extern const struct eventop devpollops; 998585484eSchristos #endif 1008585484eSchristos #ifdef _WIN32 1018585484eSchristos extern const struct eventop win32ops; 1028585484eSchristos #endif 1038585484eSchristos 1048585484eSchristos /* Array of backends in order of preference. */ 1058585484eSchristos static const struct eventop *eventops[] = { 1068585484eSchristos #ifdef EVENT__HAVE_EVENT_PORTS 1078585484eSchristos &evportops, 1088585484eSchristos #endif 1098585484eSchristos #ifdef EVENT__HAVE_WORKING_KQUEUE 1108585484eSchristos &kqops, 1118585484eSchristos #endif 1128585484eSchristos #ifdef EVENT__HAVE_EPOLL 1138585484eSchristos &epollops, 1148585484eSchristos #endif 1158585484eSchristos #ifdef EVENT__HAVE_DEVPOLL 1168585484eSchristos &devpollops, 1178585484eSchristos #endif 1188585484eSchristos #ifdef EVENT__HAVE_POLL 1198585484eSchristos &pollops, 1208585484eSchristos #endif 1218585484eSchristos #ifdef EVENT__HAVE_SELECT 1228585484eSchristos &selectops, 1238585484eSchristos #endif 1248585484eSchristos #ifdef _WIN32 1258585484eSchristos &win32ops, 1268585484eSchristos #endif 1278585484eSchristos NULL 1288585484eSchristos }; 1298585484eSchristos 1308585484eSchristos /* Global state; deprecated */ 131*eabc0478Schristos EVENT2_EXPORT_SYMBOL 1328585484eSchristos struct event_base *event_global_current_base_ = NULL; 1338585484eSchristos #define current_base event_global_current_base_ 1348585484eSchristos 1358585484eSchristos /* Global state */ 1368585484eSchristos 1378585484eSchristos static void *event_self_cbarg_ptr_ = NULL; 1388585484eSchristos 1398585484eSchristos /* Prototypes */ 1408585484eSchristos static void event_queue_insert_active(struct event_base *, struct event_callback *); 1418585484eSchristos static void event_queue_insert_active_later(struct event_base *, struct event_callback *); 1428585484eSchristos static void event_queue_insert_timeout(struct event_base *, struct event *); 1438585484eSchristos static void event_queue_insert_inserted(struct event_base *, struct event *); 1448585484eSchristos static void event_queue_remove_active(struct event_base *, struct event_callback *); 1458585484eSchristos static void event_queue_remove_active_later(struct event_base *, struct event_callback *); 1468585484eSchristos static void event_queue_remove_timeout(struct event_base *, struct event *); 1478585484eSchristos static void event_queue_remove_inserted(struct event_base *, struct event *); 1488585484eSchristos static void event_queue_make_later_events_active(struct event_base *base); 1498585484eSchristos 1508585484eSchristos static int evthread_make_base_notifiable_nolock_(struct event_base *base); 151b8ecfcfeSchristos static int event_del_(struct event *ev, int blocking); 1528585484eSchristos 1538585484eSchristos #ifdef USE_REINSERT_TIMEOUT 1548585484eSchristos /* This code seems buggy; only turn it on if we find out what the trouble is. */ 1558585484eSchristos static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx); 1568585484eSchristos #endif 1578585484eSchristos 1588585484eSchristos static int event_haveevents(struct event_base *); 1598585484eSchristos 1608585484eSchristos static int event_process_active(struct event_base *); 1618585484eSchristos 1628585484eSchristos static int timeout_next(struct event_base *, struct timeval **); 1638585484eSchristos static void timeout_process(struct event_base *); 1648585484eSchristos 1658585484eSchristos static inline void event_signal_closure(struct event_base *, struct event *ev); 1668585484eSchristos static inline void event_persist_closure(struct event_base *, struct event *ev); 1678585484eSchristos 1688585484eSchristos static int evthread_notify_base(struct event_base *base); 1698585484eSchristos 1708585484eSchristos static void insert_common_timeout_inorder(struct common_timeout_list *ctl, 1718585484eSchristos struct event *ev); 1728585484eSchristos 1738585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 1748585484eSchristos /* These functions implement a hashtable of which 'struct event *' structures 1758585484eSchristos * have been setup or added. We don't want to trust the content of the struct 1768585484eSchristos * event itself, since we're trying to work through cases where an event gets 1778585484eSchristos * clobbered or freed. Instead, we keep a hashtable indexed by the pointer. 1788585484eSchristos */ 1798585484eSchristos 1808585484eSchristos struct event_debug_entry { 1818585484eSchristos HT_ENTRY(event_debug_entry) node; 1828585484eSchristos const struct event *ptr; 1838585484eSchristos unsigned added : 1; 1848585484eSchristos }; 1858585484eSchristos 1868585484eSchristos static inline unsigned 1878585484eSchristos hash_debug_entry(const struct event_debug_entry *e) 1888585484eSchristos { 1898585484eSchristos /* We need to do this silliness to convince compilers that we 1908585484eSchristos * honestly mean to cast e->ptr to an integer, and discard any 1918585484eSchristos * part of it that doesn't fit in an unsigned. 1928585484eSchristos */ 1938585484eSchristos unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); 1948585484eSchristos /* Our hashtable implementation is pretty sensitive to low bits, 1958585484eSchristos * and every struct event is over 64 bytes in size, so we can 1968585484eSchristos * just say >>6. */ 1978585484eSchristos return (u >> 6); 1988585484eSchristos } 1998585484eSchristos 2008585484eSchristos static inline int 2018585484eSchristos eq_debug_entry(const struct event_debug_entry *a, 2028585484eSchristos const struct event_debug_entry *b) 2038585484eSchristos { 2048585484eSchristos return a->ptr == b->ptr; 2058585484eSchristos } 2068585484eSchristos 2078585484eSchristos int event_debug_mode_on_ = 0; 208*eabc0478Schristos 209*eabc0478Schristos 210*eabc0478Schristos #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) 211*eabc0478Schristos /** 212*eabc0478Schristos * @brief debug mode variable which is set for any function/structure that needs 213*eabc0478Schristos * to be shared across threads (if thread support is enabled). 214*eabc0478Schristos * 215*eabc0478Schristos * When and if evthreads are initialized, this variable will be evaluated, 216*eabc0478Schristos * and if set to something other than zero, this means the evthread setup 217*eabc0478Schristos * functions were called out of order. 218*eabc0478Schristos * 219*eabc0478Schristos * See: "Locks and threading" in the documentation. 220*eabc0478Schristos */ 221*eabc0478Schristos int event_debug_created_threadable_ctx_ = 0; 222*eabc0478Schristos #endif 223*eabc0478Schristos 2248585484eSchristos /* Set if it's too late to enable event_debug_mode. */ 2258585484eSchristos static int event_debug_mode_too_late = 0; 2268585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 2278585484eSchristos static void *event_debug_map_lock_ = NULL; 2288585484eSchristos #endif 2298585484eSchristos static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map = 2308585484eSchristos HT_INITIALIZER(); 2318585484eSchristos 2328585484eSchristos HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry, 2338585484eSchristos eq_debug_entry) 2348585484eSchristos HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry, 2358585484eSchristos eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free) 2368585484eSchristos 237*eabc0478Schristos /* record that ev is now setup (that is, ready for an add) */ 238*eabc0478Schristos static void event_debug_note_setup_(const struct event *ev) 239*eabc0478Schristos { 240*eabc0478Schristos struct event_debug_entry *dent, find; 241*eabc0478Schristos 242*eabc0478Schristos if (!event_debug_mode_on_) 243*eabc0478Schristos goto out; 244*eabc0478Schristos 245*eabc0478Schristos find.ptr = ev; 246*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 247*eabc0478Schristos dent = HT_FIND(event_debug_map, &global_debug_map, &find); 248*eabc0478Schristos if (dent) { 249*eabc0478Schristos dent->added = 0; 250*eabc0478Schristos } else { 251*eabc0478Schristos dent = mm_malloc(sizeof(*dent)); 252*eabc0478Schristos if (!dent) 253*eabc0478Schristos event_err(1, 254*eabc0478Schristos "Out of memory in debugging code"); 255*eabc0478Schristos dent->ptr = ev; 256*eabc0478Schristos dent->added = 0; 257*eabc0478Schristos HT_INSERT(event_debug_map, &global_debug_map, dent); 258*eabc0478Schristos } 259*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 260*eabc0478Schristos 261*eabc0478Schristos out: 262*eabc0478Schristos event_debug_mode_too_late = 1; 263*eabc0478Schristos } 264*eabc0478Schristos /* record that ev is no longer setup */ 265*eabc0478Schristos static void event_debug_note_teardown_(const struct event *ev) 266*eabc0478Schristos { 267*eabc0478Schristos struct event_debug_entry *dent, find; 268*eabc0478Schristos 269*eabc0478Schristos if (!event_debug_mode_on_) 270*eabc0478Schristos goto out; 271*eabc0478Schristos 272*eabc0478Schristos find.ptr = ev; 273*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 274*eabc0478Schristos dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); 275*eabc0478Schristos if (dent) 276*eabc0478Schristos mm_free(dent); 277*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 278*eabc0478Schristos 279*eabc0478Schristos out: 280*eabc0478Schristos event_debug_mode_too_late = 1; 281*eabc0478Schristos } 2828585484eSchristos /* Macro: record that ev is now added */ 283*eabc0478Schristos static void event_debug_note_add_(const struct event *ev) 284*eabc0478Schristos { 285*eabc0478Schristos struct event_debug_entry *dent,find; 286*eabc0478Schristos 287*eabc0478Schristos if (!event_debug_mode_on_) 288*eabc0478Schristos goto out; 289*eabc0478Schristos 290*eabc0478Schristos find.ptr = ev; 291*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 292*eabc0478Schristos dent = HT_FIND(event_debug_map, &global_debug_map, &find); 293*eabc0478Schristos if (dent) { 294*eabc0478Schristos dent->added = 1; 295*eabc0478Schristos } else { 296*eabc0478Schristos event_errx(EVENT_ERR_ABORT_, 297*eabc0478Schristos "%s: noting an add on a non-setup event %p" 298*eabc0478Schristos " (events: 0x%x, fd: "EV_SOCK_FMT 299*eabc0478Schristos ", flags: 0x%x)", 300*eabc0478Schristos __func__, ev, ev->ev_events, 301*eabc0478Schristos EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); 302*eabc0478Schristos } 303*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 304*eabc0478Schristos 305*eabc0478Schristos out: 306*eabc0478Schristos event_debug_mode_too_late = 1; 307*eabc0478Schristos } 308*eabc0478Schristos /* record that ev is no longer added */ 309*eabc0478Schristos static void event_debug_note_del_(const struct event *ev) 310*eabc0478Schristos { 311*eabc0478Schristos struct event_debug_entry *dent, find; 312*eabc0478Schristos 313*eabc0478Schristos if (!event_debug_mode_on_) 314*eabc0478Schristos goto out; 315*eabc0478Schristos 316*eabc0478Schristos find.ptr = ev; 317*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 318*eabc0478Schristos dent = HT_FIND(event_debug_map, &global_debug_map, &find); 319*eabc0478Schristos if (dent) { 320*eabc0478Schristos dent->added = 0; 321*eabc0478Schristos } else { 322*eabc0478Schristos event_errx(EVENT_ERR_ABORT_, 323*eabc0478Schristos "%s: noting a del on a non-setup event %p" 324*eabc0478Schristos " (events: 0x%x, fd: "EV_SOCK_FMT 325*eabc0478Schristos ", flags: 0x%x)", 326*eabc0478Schristos __func__, ev, ev->ev_events, 327*eabc0478Schristos EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); 328*eabc0478Schristos } 329*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 330*eabc0478Schristos 331*eabc0478Schristos out: 332*eabc0478Schristos event_debug_mode_too_late = 1; 333*eabc0478Schristos } 334*eabc0478Schristos /* assert that ev is setup (i.e., okay to add or inspect) */ 335*eabc0478Schristos static void event_debug_assert_is_setup_(const struct event *ev) 336*eabc0478Schristos { 337*eabc0478Schristos struct event_debug_entry *dent, find; 338*eabc0478Schristos 339*eabc0478Schristos if (!event_debug_mode_on_) 340*eabc0478Schristos return; 341*eabc0478Schristos 342*eabc0478Schristos find.ptr = ev; 343*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 344*eabc0478Schristos dent = HT_FIND(event_debug_map, &global_debug_map, &find); 345*eabc0478Schristos if (!dent) { 346*eabc0478Schristos event_errx(EVENT_ERR_ABORT_, 347*eabc0478Schristos "%s called on a non-initialized event %p" 348*eabc0478Schristos " (events: 0x%x, fd: "EV_SOCK_FMT 349*eabc0478Schristos ", flags: 0x%x)", 350*eabc0478Schristos __func__, ev, ev->ev_events, 351*eabc0478Schristos EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); 352*eabc0478Schristos } 353*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 354*eabc0478Schristos } 355*eabc0478Schristos /* assert that ev is not added (i.e., okay to tear down or set up again) */ 356*eabc0478Schristos static void event_debug_assert_not_added_(const struct event *ev) 357*eabc0478Schristos { 358*eabc0478Schristos struct event_debug_entry *dent, find; 359*eabc0478Schristos 360*eabc0478Schristos if (!event_debug_mode_on_) 361*eabc0478Schristos return; 362*eabc0478Schristos 363*eabc0478Schristos find.ptr = ev; 364*eabc0478Schristos EVLOCK_LOCK(event_debug_map_lock_, 0); 365*eabc0478Schristos dent = HT_FIND(event_debug_map, &global_debug_map, &find); 366*eabc0478Schristos if (dent && dent->added) { 367*eabc0478Schristos event_errx(EVENT_ERR_ABORT_, 368*eabc0478Schristos "%s called on an already added event %p" 369*eabc0478Schristos " (events: 0x%x, fd: "EV_SOCK_FMT", " 370*eabc0478Schristos "flags: 0x%x)", 371*eabc0478Schristos __func__, ev, ev->ev_events, 372*eabc0478Schristos EV_SOCK_ARG(ev->ev_fd), ev->ev_flags); 373*eabc0478Schristos } 374*eabc0478Schristos EVLOCK_UNLOCK(event_debug_map_lock_, 0); 375*eabc0478Schristos } 376*eabc0478Schristos static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) 377*eabc0478Schristos { 378*eabc0478Schristos if (!event_debug_mode_on_) 379*eabc0478Schristos return; 380*eabc0478Schristos if (fd < 0) 381*eabc0478Schristos return; 382*eabc0478Schristos 383*eabc0478Schristos #ifndef _WIN32 384*eabc0478Schristos { 385*eabc0478Schristos int flags; 386*eabc0478Schristos if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) { 387*eabc0478Schristos EVUTIL_ASSERT(flags & O_NONBLOCK); 388*eabc0478Schristos } 389*eabc0478Schristos } 390*eabc0478Schristos #endif 391*eabc0478Schristos } 3928585484eSchristos #else 393*eabc0478Schristos static void event_debug_note_setup_(const struct event *ev) { (void)ev; } 394*eabc0478Schristos static void event_debug_note_teardown_(const struct event *ev) { (void)ev; } 395*eabc0478Schristos static void event_debug_note_add_(const struct event *ev) { (void)ev; } 396*eabc0478Schristos static void event_debug_note_del_(const struct event *ev) { (void)ev; } 397*eabc0478Schristos static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; } 398*eabc0478Schristos static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; } 399*eabc0478Schristos static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; } 4008585484eSchristos #endif 4018585484eSchristos 4028585484eSchristos #define EVENT_BASE_ASSERT_LOCKED(base) \ 4038585484eSchristos EVLOCK_ASSERT_LOCKED((base)->th_base_lock) 4048585484eSchristos 4058585484eSchristos /* How often (in seconds) do we check for changes in wall clock time relative 4068585484eSchristos * to monotonic time? Set this to -1 for 'never.' */ 4078585484eSchristos #define CLOCK_SYNC_INTERVAL 5 4088585484eSchristos 4098585484eSchristos /** Set 'tp' to the current time according to 'base'. We must hold the lock 4108585484eSchristos * on 'base'. If there is a cached time, return it. Otherwise, use 4118585484eSchristos * clock_gettime or gettimeofday as appropriate to find out the right time. 4128585484eSchristos * Return 0 on success, -1 on failure. 4138585484eSchristos */ 4148585484eSchristos static int 4158585484eSchristos gettime(struct event_base *base, struct timeval *tp) 4168585484eSchristos { 4178585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 4188585484eSchristos 4198585484eSchristos if (base->tv_cache.tv_sec) { 4208585484eSchristos *tp = base->tv_cache; 4218585484eSchristos return (0); 4228585484eSchristos } 4238585484eSchristos 4248585484eSchristos if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { 4258585484eSchristos return -1; 4268585484eSchristos } 4278585484eSchristos 4288585484eSchristos if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL 4298585484eSchristos < tp->tv_sec) { 4308585484eSchristos struct timeval tv; 4318585484eSchristos evutil_gettimeofday(&tv,NULL); 4328585484eSchristos evutil_timersub(&tv, tp, &base->tv_clock_diff); 4338585484eSchristos base->last_updated_clock_diff = tp->tv_sec; 4348585484eSchristos } 4358585484eSchristos 4368585484eSchristos return 0; 4378585484eSchristos } 4388585484eSchristos 4398585484eSchristos int 4408585484eSchristos event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv) 4418585484eSchristos { 4428585484eSchristos int r; 4438585484eSchristos if (!base) { 4448585484eSchristos base = current_base; 4458585484eSchristos if (!current_base) 4468585484eSchristos return evutil_gettimeofday(tv, NULL); 4478585484eSchristos } 4488585484eSchristos 4498585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 4508585484eSchristos if (base->tv_cache.tv_sec == 0) { 4518585484eSchristos r = evutil_gettimeofday(tv, NULL); 4528585484eSchristos } else { 4538585484eSchristos evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv); 4548585484eSchristos r = 0; 4558585484eSchristos } 4568585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 4578585484eSchristos return r; 4588585484eSchristos } 4598585484eSchristos 4608585484eSchristos /** Make 'base' have no current cached time. */ 4618585484eSchristos static inline void 4628585484eSchristos clear_time_cache(struct event_base *base) 4638585484eSchristos { 4648585484eSchristos base->tv_cache.tv_sec = 0; 4658585484eSchristos } 4668585484eSchristos 4678585484eSchristos /** Replace the cached time in 'base' with the current time. */ 4688585484eSchristos static inline void 4698585484eSchristos update_time_cache(struct event_base *base) 4708585484eSchristos { 4718585484eSchristos base->tv_cache.tv_sec = 0; 4728585484eSchristos if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME)) 4738585484eSchristos gettime(base, &base->tv_cache); 4748585484eSchristos } 4758585484eSchristos 4768585484eSchristos int 4778585484eSchristos event_base_update_cache_time(struct event_base *base) 4788585484eSchristos { 4798585484eSchristos 4808585484eSchristos if (!base) { 4818585484eSchristos base = current_base; 4828585484eSchristos if (!current_base) 4838585484eSchristos return -1; 4848585484eSchristos } 4858585484eSchristos 4868585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 487b8ecfcfeSchristos if (base->running_loop) 4888585484eSchristos update_time_cache(base); 4898585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 4908585484eSchristos return 0; 4918585484eSchristos } 4928585484eSchristos 4938585484eSchristos static inline struct event * 4948585484eSchristos event_callback_to_event(struct event_callback *evcb) 4958585484eSchristos { 4968585484eSchristos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT)); 4978585484eSchristos return EVUTIL_UPCAST(evcb, struct event, ev_evcallback); 4988585484eSchristos } 4998585484eSchristos 5008585484eSchristos static inline struct event_callback * 5018585484eSchristos event_to_event_callback(struct event *ev) 5028585484eSchristos { 5038585484eSchristos return &ev->ev_evcallback; 5048585484eSchristos } 5058585484eSchristos 5068585484eSchristos struct event_base * 5078585484eSchristos event_init(void) 5088585484eSchristos { 5098585484eSchristos struct event_base *base = event_base_new_with_config(NULL); 5108585484eSchristos 5118585484eSchristos if (base == NULL) { 5128585484eSchristos event_errx(1, "%s: Unable to construct event_base", __func__); 5138585484eSchristos return NULL; 5148585484eSchristos } 5158585484eSchristos 5168585484eSchristos current_base = base; 5178585484eSchristos 5188585484eSchristos return (base); 5198585484eSchristos } 5208585484eSchristos 5218585484eSchristos struct event_base * 5228585484eSchristos event_base_new(void) 5238585484eSchristos { 5248585484eSchristos struct event_base *base = NULL; 5258585484eSchristos struct event_config *cfg = event_config_new(); 5268585484eSchristos if (cfg) { 5278585484eSchristos base = event_base_new_with_config(cfg); 5288585484eSchristos event_config_free(cfg); 5298585484eSchristos } 5308585484eSchristos return base; 5318585484eSchristos } 5328585484eSchristos 5338585484eSchristos /** Return true iff 'method' is the name of a method that 'cfg' tells us to 5348585484eSchristos * avoid. */ 5358585484eSchristos static int 5368585484eSchristos event_config_is_avoided_method(const struct event_config *cfg, 5378585484eSchristos const char *method) 5388585484eSchristos { 5398585484eSchristos struct event_config_entry *entry; 5408585484eSchristos 5418585484eSchristos TAILQ_FOREACH(entry, &cfg->entries, next) { 5428585484eSchristos if (entry->avoid_method != NULL && 5438585484eSchristos strcmp(entry->avoid_method, method) == 0) 5448585484eSchristos return (1); 5458585484eSchristos } 5468585484eSchristos 5478585484eSchristos return (0); 5488585484eSchristos } 5498585484eSchristos 5508585484eSchristos /** Return true iff 'method' is disabled according to the environment. */ 5518585484eSchristos static int 5528585484eSchristos event_is_method_disabled(const char *name) 5538585484eSchristos { 5548585484eSchristos char environment[64]; 5558585484eSchristos int i; 5568585484eSchristos 5578585484eSchristos evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name); 5588585484eSchristos for (i = 8; environment[i] != '\0'; ++i) 5598585484eSchristos environment[i] = EVUTIL_TOUPPER_(environment[i]); 5608585484eSchristos /* Note that evutil_getenv_() ignores the environment entirely if 5618585484eSchristos * we're setuid */ 5628585484eSchristos return (evutil_getenv_(environment) != NULL); 5638585484eSchristos } 5648585484eSchristos 5658585484eSchristos int 5668585484eSchristos event_base_get_features(const struct event_base *base) 5678585484eSchristos { 5688585484eSchristos return base->evsel->features; 5698585484eSchristos } 5708585484eSchristos 5718585484eSchristos void 5728585484eSchristos event_enable_debug_mode(void) 5738585484eSchristos { 5748585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 5758585484eSchristos if (event_debug_mode_on_) 5768585484eSchristos event_errx(1, "%s was called twice!", __func__); 5778585484eSchristos if (event_debug_mode_too_late) 5788585484eSchristos event_errx(1, "%s must be called *before* creating any events " 5798585484eSchristos "or event_bases",__func__); 5808585484eSchristos 5818585484eSchristos event_debug_mode_on_ = 1; 5828585484eSchristos 5838585484eSchristos HT_INIT(event_debug_map, &global_debug_map); 5848585484eSchristos #endif 5858585484eSchristos } 5868585484eSchristos 5878585484eSchristos void 5888585484eSchristos event_disable_debug_mode(void) 5898585484eSchristos { 5907476e6e4Schristos #ifndef EVENT__DISABLE_DEBUG_MODE 5918585484eSchristos struct event_debug_entry **ent, *victim; 5928585484eSchristos 5938585484eSchristos EVLOCK_LOCK(event_debug_map_lock_, 0); 5948585484eSchristos for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) { 5958585484eSchristos victim = *ent; 5968585484eSchristos ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent); 5978585484eSchristos mm_free(victim); 5988585484eSchristos } 5998585484eSchristos HT_CLEAR(event_debug_map, &global_debug_map); 6008585484eSchristos EVLOCK_UNLOCK(event_debug_map_lock_ , 0); 6017476e6e4Schristos 6027476e6e4Schristos event_debug_mode_on_ = 0; 6038585484eSchristos #endif 6047476e6e4Schristos } 6058585484eSchristos 6068585484eSchristos struct event_base * 6078585484eSchristos event_base_new_with_config(const struct event_config *cfg) 6088585484eSchristos { 6098585484eSchristos int i; 6108585484eSchristos struct event_base *base; 6118585484eSchristos int should_check_environment; 6128585484eSchristos 6138585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 6148585484eSchristos event_debug_mode_too_late = 1; 6158585484eSchristos #endif 6168585484eSchristos 6178585484eSchristos if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) { 6188585484eSchristos event_warn("%s: calloc", __func__); 6198585484eSchristos return NULL; 6208585484eSchristos } 6218585484eSchristos 6228585484eSchristos if (cfg) 6238585484eSchristos base->flags = cfg->flags; 6248585484eSchristos 6258585484eSchristos should_check_environment = 6268585484eSchristos !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV)); 6278585484eSchristos 6288585484eSchristos { 6298585484eSchristos struct timeval tmp; 6308585484eSchristos int precise_time = 6318585484eSchristos cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER); 6328585484eSchristos int flags; 6338585484eSchristos if (should_check_environment && !precise_time) { 6348585484eSchristos precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL; 635*eabc0478Schristos if (precise_time) { 6368585484eSchristos base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER; 6378585484eSchristos } 638*eabc0478Schristos } 6398585484eSchristos flags = precise_time ? EV_MONOT_PRECISE : 0; 6408585484eSchristos evutil_configure_monotonic_time_(&base->monotonic_timer, flags); 6418585484eSchristos 6428585484eSchristos gettime(base, &tmp); 6438585484eSchristos } 6448585484eSchristos 6458585484eSchristos min_heap_ctor_(&base->timeheap); 6468585484eSchristos 6478585484eSchristos base->sig.ev_signal_pair[0] = -1; 6488585484eSchristos base->sig.ev_signal_pair[1] = -1; 6498585484eSchristos base->th_notify_fd[0] = -1; 6508585484eSchristos base->th_notify_fd[1] = -1; 6518585484eSchristos 6528585484eSchristos TAILQ_INIT(&base->active_later_queue); 6538585484eSchristos 6548585484eSchristos evmap_io_initmap_(&base->io); 6558585484eSchristos evmap_signal_initmap_(&base->sigmap); 6568585484eSchristos event_changelist_init_(&base->changelist); 6578585484eSchristos 6588585484eSchristos base->evbase = NULL; 6598585484eSchristos 6608585484eSchristos if (cfg) { 6618585484eSchristos memcpy(&base->max_dispatch_time, 6628585484eSchristos &cfg->max_dispatch_interval, sizeof(struct timeval)); 6638585484eSchristos base->limit_callbacks_after_prio = 6648585484eSchristos cfg->limit_callbacks_after_prio; 6658585484eSchristos } else { 6668585484eSchristos base->max_dispatch_time.tv_sec = -1; 6678585484eSchristos base->limit_callbacks_after_prio = 1; 6688585484eSchristos } 6698585484eSchristos if (cfg && cfg->max_dispatch_callbacks >= 0) { 6708585484eSchristos base->max_dispatch_callbacks = cfg->max_dispatch_callbacks; 6718585484eSchristos } else { 6728585484eSchristos base->max_dispatch_callbacks = INT_MAX; 6738585484eSchristos } 6748585484eSchristos if (base->max_dispatch_callbacks == INT_MAX && 6758585484eSchristos base->max_dispatch_time.tv_sec == -1) 6768585484eSchristos base->limit_callbacks_after_prio = INT_MAX; 6778585484eSchristos 6788585484eSchristos for (i = 0; eventops[i] && !base->evbase; i++) { 6798585484eSchristos if (cfg != NULL) { 6808585484eSchristos /* determine if this backend should be avoided */ 6818585484eSchristos if (event_config_is_avoided_method(cfg, 6828585484eSchristos eventops[i]->name)) 6838585484eSchristos continue; 6848585484eSchristos if ((eventops[i]->features & cfg->require_features) 6858585484eSchristos != cfg->require_features) 6868585484eSchristos continue; 6878585484eSchristos } 6888585484eSchristos 6898585484eSchristos /* also obey the environment variables */ 6908585484eSchristos if (should_check_environment && 6918585484eSchristos event_is_method_disabled(eventops[i]->name)) 6928585484eSchristos continue; 6938585484eSchristos 6948585484eSchristos base->evsel = eventops[i]; 6958585484eSchristos 6968585484eSchristos base->evbase = base->evsel->init(base); 6978585484eSchristos } 6988585484eSchristos 6998585484eSchristos if (base->evbase == NULL) { 7008585484eSchristos event_warnx("%s: no event mechanism available", 7018585484eSchristos __func__); 7028585484eSchristos base->evsel = NULL; 7038585484eSchristos event_base_free(base); 7048585484eSchristos return NULL; 7058585484eSchristos } 7068585484eSchristos 7078585484eSchristos if (evutil_getenv_("EVENT_SHOW_METHOD")) 7088585484eSchristos event_msgx("libevent using: %s", base->evsel->name); 7098585484eSchristos 7108585484eSchristos /* allocate a single active event queue */ 7118585484eSchristos if (event_base_priority_init(base, 1) < 0) { 7128585484eSchristos event_base_free(base); 7138585484eSchristos return NULL; 7148585484eSchristos } 7158585484eSchristos 7168585484eSchristos /* prepare for threading */ 7178585484eSchristos 718*eabc0478Schristos #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) 719*eabc0478Schristos event_debug_created_threadable_ctx_ = 1; 720*eabc0478Schristos #endif 721*eabc0478Schristos 7228585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 7238585484eSchristos if (EVTHREAD_LOCKING_ENABLED() && 7248585484eSchristos (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { 7258585484eSchristos int r; 7268585484eSchristos EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0); 7278585484eSchristos EVTHREAD_ALLOC_COND(base->current_event_cond); 7288585484eSchristos r = evthread_make_base_notifiable(base); 7298585484eSchristos if (r<0) { 7308585484eSchristos event_warnx("%s: Unable to make base notifiable.", __func__); 7318585484eSchristos event_base_free(base); 7328585484eSchristos return NULL; 7338585484eSchristos } 7348585484eSchristos } 7358585484eSchristos #endif 7368585484eSchristos 7378585484eSchristos #ifdef _WIN32 7388585484eSchristos if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP)) 7398585484eSchristos event_base_start_iocp_(base, cfg->n_cpus_hint); 7408585484eSchristos #endif 7418585484eSchristos 7428585484eSchristos return (base); 7438585484eSchristos } 7448585484eSchristos 7458585484eSchristos int 7468585484eSchristos event_base_start_iocp_(struct event_base *base, int n_cpus) 7478585484eSchristos { 7488585484eSchristos #ifdef _WIN32 7498585484eSchristos if (base->iocp) 7508585484eSchristos return 0; 7518585484eSchristos base->iocp = event_iocp_port_launch_(n_cpus); 7528585484eSchristos if (!base->iocp) { 7538585484eSchristos event_warnx("%s: Couldn't launch IOCP", __func__); 7548585484eSchristos return -1; 7558585484eSchristos } 7568585484eSchristos return 0; 7578585484eSchristos #else 7588585484eSchristos return -1; 7598585484eSchristos #endif 7608585484eSchristos } 7618585484eSchristos 7628585484eSchristos void 7638585484eSchristos event_base_stop_iocp_(struct event_base *base) 7648585484eSchristos { 7658585484eSchristos #ifdef _WIN32 7668585484eSchristos int rv; 7678585484eSchristos 7688585484eSchristos if (!base->iocp) 7698585484eSchristos return; 7708585484eSchristos rv = event_iocp_shutdown_(base->iocp, -1); 7718585484eSchristos EVUTIL_ASSERT(rv >= 0); 7728585484eSchristos base->iocp = NULL; 7738585484eSchristos #endif 7748585484eSchristos } 7758585484eSchristos 776b8ecfcfeSchristos static int 777b8ecfcfeSchristos event_base_cancel_single_callback_(struct event_base *base, 778b8ecfcfeSchristos struct event_callback *evcb, 779b8ecfcfeSchristos int run_finalizers) 780b8ecfcfeSchristos { 781b8ecfcfeSchristos int result = 0; 782b8ecfcfeSchristos 783b8ecfcfeSchristos if (evcb->evcb_flags & EVLIST_INIT) { 784b8ecfcfeSchristos struct event *ev = event_callback_to_event(evcb); 785b8ecfcfeSchristos if (!(ev->ev_flags & EVLIST_INTERNAL)) { 786b8ecfcfeSchristos event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING); 787b8ecfcfeSchristos result = 1; 788b8ecfcfeSchristos } 789b8ecfcfeSchristos } else { 790b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 791b8ecfcfeSchristos event_callback_cancel_nolock_(base, evcb, 1); 792b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 793b8ecfcfeSchristos result = 1; 794b8ecfcfeSchristos } 795b8ecfcfeSchristos 796b8ecfcfeSchristos if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) { 797b8ecfcfeSchristos switch (evcb->evcb_closure) { 798b8ecfcfeSchristos case EV_CLOSURE_EVENT_FINALIZE: 799b8ecfcfeSchristos case EV_CLOSURE_EVENT_FINALIZE_FREE: { 800b8ecfcfeSchristos struct event *ev = event_callback_to_event(evcb); 801b8ecfcfeSchristos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg); 802b8ecfcfeSchristos if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) 803b8ecfcfeSchristos mm_free(ev); 804b8ecfcfeSchristos break; 805b8ecfcfeSchristos } 806b8ecfcfeSchristos case EV_CLOSURE_CB_FINALIZE: 807b8ecfcfeSchristos evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg); 808b8ecfcfeSchristos break; 809b8ecfcfeSchristos default: 810b8ecfcfeSchristos break; 811b8ecfcfeSchristos } 812b8ecfcfeSchristos } 813b8ecfcfeSchristos return result; 814b8ecfcfeSchristos } 815b8ecfcfeSchristos 816*eabc0478Schristos static int event_base_free_queues_(struct event_base *base, int run_finalizers) 817*eabc0478Schristos { 818*eabc0478Schristos int deleted = 0, i; 819*eabc0478Schristos 820*eabc0478Schristos for (i = 0; i < base->nactivequeues; ++i) { 821*eabc0478Schristos struct event_callback *evcb, *next; 822*eabc0478Schristos for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) { 823*eabc0478Schristos next = TAILQ_NEXT(evcb, evcb_active_next); 824*eabc0478Schristos deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); 825*eabc0478Schristos evcb = next; 826*eabc0478Schristos } 827*eabc0478Schristos } 828*eabc0478Schristos 829*eabc0478Schristos { 830*eabc0478Schristos struct event_callback *evcb; 831*eabc0478Schristos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { 832*eabc0478Schristos deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); 833*eabc0478Schristos } 834*eabc0478Schristos } 835*eabc0478Schristos 836*eabc0478Schristos return deleted; 837*eabc0478Schristos } 838*eabc0478Schristos 839b8ecfcfeSchristos static void 840b8ecfcfeSchristos event_base_free_(struct event_base *base, int run_finalizers) 8418585484eSchristos { 8428585484eSchristos int i, n_deleted=0; 8438585484eSchristos struct event *ev; 8448585484eSchristos /* XXXX grab the lock? If there is contention when one thread frees 8458585484eSchristos * the base, then the contending thread will be very sad soon. */ 8468585484eSchristos 8478585484eSchristos /* event_base_free(NULL) is how to free the current_base if we 8488585484eSchristos * made it with event_init and forgot to hold a reference to it. */ 8498585484eSchristos if (base == NULL && current_base) 8508585484eSchristos base = current_base; 8518585484eSchristos /* Don't actually free NULL. */ 8528585484eSchristos if (base == NULL) { 8538585484eSchristos event_warnx("%s: no base to free", __func__); 8548585484eSchristos return; 8558585484eSchristos } 8568585484eSchristos /* XXX(niels) - check for internal events first */ 8578585484eSchristos 8588585484eSchristos #ifdef _WIN32 8598585484eSchristos event_base_stop_iocp_(base); 8608585484eSchristos #endif 8618585484eSchristos 8628585484eSchristos /* threading fds if we have them */ 8638585484eSchristos if (base->th_notify_fd[0] != -1) { 8648585484eSchristos event_del(&base->th_notify); 8658585484eSchristos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); 8668585484eSchristos if (base->th_notify_fd[1] != -1) 8678585484eSchristos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); 8688585484eSchristos base->th_notify_fd[0] = -1; 8698585484eSchristos base->th_notify_fd[1] = -1; 8708585484eSchristos event_debug_unassign(&base->th_notify); 8718585484eSchristos } 8728585484eSchristos 8738585484eSchristos /* Delete all non-internal events. */ 8748585484eSchristos evmap_delete_all_(base); 8758585484eSchristos 8768585484eSchristos while ((ev = min_heap_top_(&base->timeheap)) != NULL) { 8778585484eSchristos event_del(ev); 8788585484eSchristos ++n_deleted; 8798585484eSchristos } 8808585484eSchristos for (i = 0; i < base->n_common_timeouts; ++i) { 8818585484eSchristos struct common_timeout_list *ctl = 8828585484eSchristos base->common_timeout_queues[i]; 8838585484eSchristos event_del(&ctl->timeout_event); /* Internal; doesn't count */ 8848585484eSchristos event_debug_unassign(&ctl->timeout_event); 8858585484eSchristos for (ev = TAILQ_FIRST(&ctl->events); ev; ) { 8868585484eSchristos struct event *next = TAILQ_NEXT(ev, 8878585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 8888585484eSchristos if (!(ev->ev_flags & EVLIST_INTERNAL)) { 8898585484eSchristos event_del(ev); 8908585484eSchristos ++n_deleted; 8918585484eSchristos } 8928585484eSchristos ev = next; 8938585484eSchristos } 8948585484eSchristos mm_free(ctl); 8958585484eSchristos } 8968585484eSchristos if (base->common_timeout_queues) 8978585484eSchristos mm_free(base->common_timeout_queues); 8988585484eSchristos 899*eabc0478Schristos for (;;) { 900*eabc0478Schristos /* For finalizers we can register yet another finalizer out from 901*eabc0478Schristos * finalizer, and iff finalizer will be in active_later_queue we can 902*eabc0478Schristos * add finalizer to activequeues, and we will have events in 903*eabc0478Schristos * activequeues after this function returns, which is not what we want 904*eabc0478Schristos * (we even have an assertion for this). 905*eabc0478Schristos * 906*eabc0478Schristos * A simple case is bufferevent with underlying (i.e. filters). 907*eabc0478Schristos */ 908*eabc0478Schristos int i = event_base_free_queues_(base, run_finalizers); 909*eabc0478Schristos event_debug(("%s: %d events freed", __func__, i)); 910*eabc0478Schristos if (!i) { 911*eabc0478Schristos break; 9128585484eSchristos } 913*eabc0478Schristos n_deleted += i; 9148585484eSchristos } 9158585484eSchristos 9168585484eSchristos if (n_deleted) 9178585484eSchristos event_debug(("%s: %d events were still set in base", 9188585484eSchristos __func__, n_deleted)); 9198585484eSchristos 9208585484eSchristos while (LIST_FIRST(&base->once_events)) { 9218585484eSchristos struct event_once *eonce = LIST_FIRST(&base->once_events); 9228585484eSchristos LIST_REMOVE(eonce, next_once); 9238585484eSchristos mm_free(eonce); 9248585484eSchristos } 9258585484eSchristos 9268585484eSchristos if (base->evsel != NULL && base->evsel->dealloc != NULL) 9278585484eSchristos base->evsel->dealloc(base); 9288585484eSchristos 9298585484eSchristos for (i = 0; i < base->nactivequeues; ++i) 9308585484eSchristos EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i])); 9318585484eSchristos 9328585484eSchristos EVUTIL_ASSERT(min_heap_empty_(&base->timeheap)); 9338585484eSchristos min_heap_dtor_(&base->timeheap); 9348585484eSchristos 9358585484eSchristos mm_free(base->activequeues); 9368585484eSchristos 9378585484eSchristos evmap_io_clear_(&base->io); 9388585484eSchristos evmap_signal_clear_(&base->sigmap); 9398585484eSchristos event_changelist_freemem_(&base->changelist); 9408585484eSchristos 9418585484eSchristos EVTHREAD_FREE_LOCK(base->th_base_lock, 0); 9428585484eSchristos EVTHREAD_FREE_COND(base->current_event_cond); 9438585484eSchristos 944b8ecfcfeSchristos /* If we're freeing current_base, there won't be a current_base. */ 945b8ecfcfeSchristos if (base == current_base) 946b8ecfcfeSchristos current_base = NULL; 9478585484eSchristos mm_free(base); 9488585484eSchristos } 9498585484eSchristos 950b8ecfcfeSchristos void 951b8ecfcfeSchristos event_base_free_nofinalize(struct event_base *base) 952b8ecfcfeSchristos { 953b8ecfcfeSchristos event_base_free_(base, 0); 954b8ecfcfeSchristos } 955b8ecfcfeSchristos 956b8ecfcfeSchristos void 957b8ecfcfeSchristos event_base_free(struct event_base *base) 958b8ecfcfeSchristos { 959b8ecfcfeSchristos event_base_free_(base, 1); 960b8ecfcfeSchristos } 961b8ecfcfeSchristos 9628585484eSchristos /* Fake eventop; used to disable the backend temporarily inside event_reinit 9638585484eSchristos * so that we can call event_del() on an event without telling the backend. 9648585484eSchristos */ 9658585484eSchristos static int 9668585484eSchristos nil_backend_del(struct event_base *b, evutil_socket_t fd, short old, 9678585484eSchristos short events, void *fdinfo) 9688585484eSchristos { 9698585484eSchristos return 0; 9708585484eSchristos } 9718585484eSchristos const struct eventop nil_eventop = { 9728585484eSchristos "nil", 9738585484eSchristos NULL, /* init: unused. */ 9748585484eSchristos NULL, /* add: unused. */ 9758585484eSchristos nil_backend_del, /* del: used, so needs to be killed. */ 9768585484eSchristos NULL, /* dispatch: unused. */ 9778585484eSchristos NULL, /* dealloc: unused. */ 9788585484eSchristos 0, 0, 0 9798585484eSchristos }; 9808585484eSchristos 9818585484eSchristos /* reinitialize the event base after a fork */ 9828585484eSchristos int 9838585484eSchristos event_reinit(struct event_base *base) 9848585484eSchristos { 9858585484eSchristos const struct eventop *evsel; 9868585484eSchristos int res = 0; 9878585484eSchristos int was_notifiable = 0; 9888585484eSchristos int had_signal_added = 0; 9898585484eSchristos 9908585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 9918585484eSchristos 9928585484eSchristos evsel = base->evsel; 9938585484eSchristos 9948585484eSchristos /* check if this event mechanism requires reinit on the backend */ 9958585484eSchristos if (evsel->need_reinit) { 9968585484eSchristos /* We're going to call event_del() on our notify events (the 9978585484eSchristos * ones that tell about signals and wakeup events). But we 9988585484eSchristos * don't actually want to tell the backend to change its 9998585484eSchristos * state, since it might still share some resource (a kqueue, 10008585484eSchristos * an epoll fd) with the parent process, and we don't want to 10018585484eSchristos * delete the fds from _that_ backend, we temporarily stub out 10028585484eSchristos * the evsel with a replacement. 10038585484eSchristos */ 10048585484eSchristos base->evsel = &nil_eventop; 10058585484eSchristos } 10068585484eSchristos 10078585484eSchristos /* We need to re-create a new signal-notification fd and a new 10088585484eSchristos * thread-notification fd. Otherwise, we'll still share those with 10098585484eSchristos * the parent process, which would make any notification sent to them 10108585484eSchristos * get received by one or both of the event loops, more or less at 10118585484eSchristos * random. 10128585484eSchristos */ 10138585484eSchristos if (base->sig.ev_signal_added) { 1014b8ecfcfeSchristos event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK); 10158585484eSchristos event_debug_unassign(&base->sig.ev_signal); 10168585484eSchristos memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal)); 1017*eabc0478Schristos had_signal_added = 1; 1018*eabc0478Schristos base->sig.ev_signal_added = 0; 1019*eabc0478Schristos } 10208585484eSchristos if (base->sig.ev_signal_pair[0] != -1) 10218585484eSchristos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); 10228585484eSchristos if (base->sig.ev_signal_pair[1] != -1) 10238585484eSchristos EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); 10248585484eSchristos if (base->th_notify_fn != NULL) { 10258585484eSchristos was_notifiable = 1; 10268585484eSchristos base->th_notify_fn = NULL; 10278585484eSchristos } 10288585484eSchristos if (base->th_notify_fd[0] != -1) { 1029b8ecfcfeSchristos event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK); 10308585484eSchristos EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); 10318585484eSchristos if (base->th_notify_fd[1] != -1) 10328585484eSchristos EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); 10338585484eSchristos base->th_notify_fd[0] = -1; 10348585484eSchristos base->th_notify_fd[1] = -1; 10358585484eSchristos event_debug_unassign(&base->th_notify); 10368585484eSchristos } 10378585484eSchristos 10388585484eSchristos /* Replace the original evsel. */ 10398585484eSchristos base->evsel = evsel; 10408585484eSchristos 10418585484eSchristos if (evsel->need_reinit) { 10428585484eSchristos /* Reconstruct the backend through brute-force, so that we do 10438585484eSchristos * not share any structures with the parent process. For some 10448585484eSchristos * backends, this is necessary: epoll and kqueue, for 10458585484eSchristos * instance, have events associated with a kernel 10468585484eSchristos * structure. If didn't reinitialize, we'd share that 10478585484eSchristos * structure with the parent process, and any changes made by 10488585484eSchristos * the parent would affect our backend's behavior (and vice 10498585484eSchristos * versa). 10508585484eSchristos */ 10518585484eSchristos if (base->evsel->dealloc != NULL) 10528585484eSchristos base->evsel->dealloc(base); 10538585484eSchristos base->evbase = evsel->init(base); 10548585484eSchristos if (base->evbase == NULL) { 10558585484eSchristos event_errx(1, 10568585484eSchristos "%s: could not reinitialize event mechanism", 10578585484eSchristos __func__); 10588585484eSchristos res = -1; 10598585484eSchristos goto done; 10608585484eSchristos } 10618585484eSchristos 10628585484eSchristos /* Empty out the changelist (if any): we are starting from a 10638585484eSchristos * blank slate. */ 10648585484eSchristos event_changelist_freemem_(&base->changelist); 10658585484eSchristos 10668585484eSchristos /* Tell the event maps to re-inform the backend about all 10678585484eSchristos * pending events. This will make the signal notification 10688585484eSchristos * event get re-created if necessary. */ 10698585484eSchristos if (evmap_reinit_(base) < 0) 10708585484eSchristos res = -1; 10718585484eSchristos } else { 10728585484eSchristos res = evsig_init_(base); 1073*eabc0478Schristos if (res == 0 && had_signal_added) { 1074*eabc0478Schristos res = event_add_nolock_(&base->sig.ev_signal, NULL, 0); 1075*eabc0478Schristos if (res == 0) 1076*eabc0478Schristos base->sig.ev_signal_added = 1; 1077*eabc0478Schristos } 10788585484eSchristos } 10798585484eSchristos 10808585484eSchristos /* If we were notifiable before, and nothing just exploded, become 10818585484eSchristos * notifiable again. */ 10828585484eSchristos if (was_notifiable && res == 0) 10838585484eSchristos res = evthread_make_base_notifiable_nolock_(base); 10848585484eSchristos 10858585484eSchristos done: 10868585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 10878585484eSchristos return (res); 10888585484eSchristos } 10898585484eSchristos 10907476e6e4Schristos /* Get the monotonic time for this event_base' timer */ 10917476e6e4Schristos int 10927476e6e4Schristos event_gettime_monotonic(struct event_base *base, struct timeval *tv) 10937476e6e4Schristos { 10947476e6e4Schristos int rv = -1; 10957476e6e4Schristos 10967476e6e4Schristos if (base && tv) { 10977476e6e4Schristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 10987476e6e4Schristos rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv); 10997476e6e4Schristos EVBASE_RELEASE_LOCK(base, th_base_lock); 11007476e6e4Schristos } 11017476e6e4Schristos 11027476e6e4Schristos return rv; 11037476e6e4Schristos } 11047476e6e4Schristos 11058585484eSchristos const char ** 11068585484eSchristos event_get_supported_methods(void) 11078585484eSchristos { 11088585484eSchristos static const char **methods = NULL; 11098585484eSchristos const struct eventop **method; 11108585484eSchristos const char **tmp; 11118585484eSchristos int i = 0, k; 11128585484eSchristos 11138585484eSchristos /* count all methods */ 11148585484eSchristos for (method = &eventops[0]; *method != NULL; ++method) { 11158585484eSchristos ++i; 11168585484eSchristos } 11178585484eSchristos 11188585484eSchristos /* allocate one more than we need for the NULL pointer */ 11198585484eSchristos tmp = mm_calloc((i + 1), sizeof(char *)); 11208585484eSchristos if (tmp == NULL) 11218585484eSchristos return (NULL); 11228585484eSchristos 11238585484eSchristos /* populate the array with the supported methods */ 11248585484eSchristos for (k = 0, i = 0; eventops[k] != NULL; ++k) { 11258585484eSchristos tmp[i++] = eventops[k]->name; 11268585484eSchristos } 11278585484eSchristos tmp[i] = NULL; 11288585484eSchristos 11298585484eSchristos if (methods != NULL) 11308585484eSchristos mm_free((char**)methods); 11318585484eSchristos 11328585484eSchristos methods = tmp; 11338585484eSchristos 11348585484eSchristos return (methods); 11358585484eSchristos } 11368585484eSchristos 11378585484eSchristos struct event_config * 11388585484eSchristos event_config_new(void) 11398585484eSchristos { 11408585484eSchristos struct event_config *cfg = mm_calloc(1, sizeof(*cfg)); 11418585484eSchristos 11428585484eSchristos if (cfg == NULL) 11438585484eSchristos return (NULL); 11448585484eSchristos 11458585484eSchristos TAILQ_INIT(&cfg->entries); 11468585484eSchristos cfg->max_dispatch_interval.tv_sec = -1; 11478585484eSchristos cfg->max_dispatch_callbacks = INT_MAX; 11488585484eSchristos cfg->limit_callbacks_after_prio = 1; 11498585484eSchristos 11508585484eSchristos return (cfg); 11518585484eSchristos } 11528585484eSchristos 11538585484eSchristos static void 11548585484eSchristos event_config_entry_free(struct event_config_entry *entry) 11558585484eSchristos { 11568585484eSchristos if (entry->avoid_method != NULL) 11578585484eSchristos mm_free((char *)entry->avoid_method); 11588585484eSchristos mm_free(entry); 11598585484eSchristos } 11608585484eSchristos 11618585484eSchristos void 11628585484eSchristos event_config_free(struct event_config *cfg) 11638585484eSchristos { 11648585484eSchristos struct event_config_entry *entry; 11658585484eSchristos 11668585484eSchristos while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) { 11678585484eSchristos TAILQ_REMOVE(&cfg->entries, entry, next); 11688585484eSchristos event_config_entry_free(entry); 11698585484eSchristos } 11708585484eSchristos mm_free(cfg); 11718585484eSchristos } 11728585484eSchristos 11738585484eSchristos int 11748585484eSchristos event_config_set_flag(struct event_config *cfg, int flag) 11758585484eSchristos { 11768585484eSchristos if (!cfg) 11778585484eSchristos return -1; 11788585484eSchristos cfg->flags |= flag; 11798585484eSchristos return 0; 11808585484eSchristos } 11818585484eSchristos 11828585484eSchristos int 11838585484eSchristos event_config_avoid_method(struct event_config *cfg, const char *method) 11848585484eSchristos { 11858585484eSchristos struct event_config_entry *entry = mm_malloc(sizeof(*entry)); 11868585484eSchristos if (entry == NULL) 11878585484eSchristos return (-1); 11888585484eSchristos 11898585484eSchristos if ((entry->avoid_method = mm_strdup(method)) == NULL) { 11908585484eSchristos mm_free(entry); 11918585484eSchristos return (-1); 11928585484eSchristos } 11938585484eSchristos 11948585484eSchristos TAILQ_INSERT_TAIL(&cfg->entries, entry, next); 11958585484eSchristos 11968585484eSchristos return (0); 11978585484eSchristos } 11988585484eSchristos 11998585484eSchristos int 12008585484eSchristos event_config_require_features(struct event_config *cfg, 12018585484eSchristos int features) 12028585484eSchristos { 12038585484eSchristos if (!cfg) 12048585484eSchristos return (-1); 12058585484eSchristos cfg->require_features = features; 12068585484eSchristos return (0); 12078585484eSchristos } 12088585484eSchristos 12098585484eSchristos int 12108585484eSchristos event_config_set_num_cpus_hint(struct event_config *cfg, int cpus) 12118585484eSchristos { 12128585484eSchristos if (!cfg) 12138585484eSchristos return (-1); 12148585484eSchristos cfg->n_cpus_hint = cpus; 12158585484eSchristos return (0); 12168585484eSchristos } 12178585484eSchristos 12188585484eSchristos int 12198585484eSchristos event_config_set_max_dispatch_interval(struct event_config *cfg, 12208585484eSchristos const struct timeval *max_interval, int max_callbacks, int min_priority) 12218585484eSchristos { 12228585484eSchristos if (max_interval) 12238585484eSchristos memcpy(&cfg->max_dispatch_interval, max_interval, 12248585484eSchristos sizeof(struct timeval)); 12258585484eSchristos else 12268585484eSchristos cfg->max_dispatch_interval.tv_sec = -1; 12278585484eSchristos cfg->max_dispatch_callbacks = 12288585484eSchristos max_callbacks >= 0 ? max_callbacks : INT_MAX; 12298585484eSchristos if (min_priority < 0) 12308585484eSchristos min_priority = 0; 12318585484eSchristos cfg->limit_callbacks_after_prio = min_priority; 12328585484eSchristos return (0); 12338585484eSchristos } 12348585484eSchristos 12358585484eSchristos int 12368585484eSchristos event_priority_init(int npriorities) 12378585484eSchristos { 12388585484eSchristos return event_base_priority_init(current_base, npriorities); 12398585484eSchristos } 12408585484eSchristos 12418585484eSchristos int 12428585484eSchristos event_base_priority_init(struct event_base *base, int npriorities) 12438585484eSchristos { 12448585484eSchristos int i, r; 12458585484eSchristos r = -1; 12468585484eSchristos 12478585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 12488585484eSchristos 12498585484eSchristos if (N_ACTIVE_CALLBACKS(base) || npriorities < 1 12508585484eSchristos || npriorities >= EVENT_MAX_PRIORITIES) 12518585484eSchristos goto err; 12528585484eSchristos 12538585484eSchristos if (npriorities == base->nactivequeues) 12548585484eSchristos goto ok; 12558585484eSchristos 12568585484eSchristos if (base->nactivequeues) { 12578585484eSchristos mm_free(base->activequeues); 12588585484eSchristos base->nactivequeues = 0; 12598585484eSchristos } 12608585484eSchristos 12618585484eSchristos /* Allocate our priority queues */ 12628585484eSchristos base->activequeues = (struct evcallback_list *) 12638585484eSchristos mm_calloc(npriorities, sizeof(struct evcallback_list)); 12648585484eSchristos if (base->activequeues == NULL) { 12658585484eSchristos event_warn("%s: calloc", __func__); 12668585484eSchristos goto err; 12678585484eSchristos } 12688585484eSchristos base->nactivequeues = npriorities; 12698585484eSchristos 12708585484eSchristos for (i = 0; i < base->nactivequeues; ++i) { 12718585484eSchristos TAILQ_INIT(&base->activequeues[i]); 12728585484eSchristos } 12738585484eSchristos 12748585484eSchristos ok: 12758585484eSchristos r = 0; 12768585484eSchristos err: 12778585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 12788585484eSchristos return (r); 12798585484eSchristos } 12808585484eSchristos 12818585484eSchristos int 12828585484eSchristos event_base_get_npriorities(struct event_base *base) 12838585484eSchristos { 12848585484eSchristos 12858585484eSchristos int n; 12868585484eSchristos if (base == NULL) 12878585484eSchristos base = current_base; 12888585484eSchristos 12898585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 12908585484eSchristos n = base->nactivequeues; 12918585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 12928585484eSchristos return (n); 12938585484eSchristos } 12948585484eSchristos 1295b8ecfcfeSchristos int 1296b8ecfcfeSchristos event_base_get_num_events(struct event_base *base, unsigned int type) 1297b8ecfcfeSchristos { 1298b8ecfcfeSchristos int r = 0; 1299b8ecfcfeSchristos 1300b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1301b8ecfcfeSchristos 1302b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_ACTIVE) 1303b8ecfcfeSchristos r += base->event_count_active; 1304b8ecfcfeSchristos 1305b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_VIRTUAL) 1306b8ecfcfeSchristos r += base->virtual_event_count; 1307b8ecfcfeSchristos 1308b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_ADDED) 1309b8ecfcfeSchristos r += base->event_count; 1310b8ecfcfeSchristos 1311b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1312b8ecfcfeSchristos 1313b8ecfcfeSchristos return r; 1314b8ecfcfeSchristos } 1315b8ecfcfeSchristos 1316b8ecfcfeSchristos int 1317b8ecfcfeSchristos event_base_get_max_events(struct event_base *base, unsigned int type, int clear) 1318b8ecfcfeSchristos { 1319b8ecfcfeSchristos int r = 0; 1320b8ecfcfeSchristos 1321b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1322b8ecfcfeSchristos 1323b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_ACTIVE) { 1324b8ecfcfeSchristos r += base->event_count_active_max; 1325b8ecfcfeSchristos if (clear) 1326b8ecfcfeSchristos base->event_count_active_max = 0; 1327b8ecfcfeSchristos } 1328b8ecfcfeSchristos 1329b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_VIRTUAL) { 1330b8ecfcfeSchristos r += base->virtual_event_count_max; 1331b8ecfcfeSchristos if (clear) 1332b8ecfcfeSchristos base->virtual_event_count_max = 0; 1333b8ecfcfeSchristos } 1334b8ecfcfeSchristos 1335b8ecfcfeSchristos if (type & EVENT_BASE_COUNT_ADDED) { 1336b8ecfcfeSchristos r += base->event_count_max; 1337b8ecfcfeSchristos if (clear) 1338b8ecfcfeSchristos base->event_count_max = 0; 1339b8ecfcfeSchristos } 1340b8ecfcfeSchristos 1341b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1342b8ecfcfeSchristos 1343b8ecfcfeSchristos return r; 1344b8ecfcfeSchristos } 1345b8ecfcfeSchristos 13468585484eSchristos /* Returns true iff we're currently watching any events. */ 13478585484eSchristos static int 13488585484eSchristos event_haveevents(struct event_base *base) 13498585484eSchristos { 13508585484eSchristos /* Caller must hold th_base_lock */ 13518585484eSchristos return (base->virtual_event_count > 0 || base->event_count > 0); 13528585484eSchristos } 13538585484eSchristos 13548585484eSchristos /* "closure" function called when processing active signal events */ 13558585484eSchristos static inline void 13568585484eSchristos event_signal_closure(struct event_base *base, struct event *ev) 13578585484eSchristos { 13588585484eSchristos short ncalls; 13598585484eSchristos int should_break; 13608585484eSchristos 13618585484eSchristos /* Allows deletes to work */ 13628585484eSchristos ncalls = ev->ev_ncalls; 13638585484eSchristos if (ncalls != 0) 13648585484eSchristos ev->ev_pncalls = &ncalls; 13658585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 13668585484eSchristos while (ncalls) { 13678585484eSchristos ncalls--; 13688585484eSchristos ev->ev_ncalls = ncalls; 13698585484eSchristos if (ncalls == 0) 13708585484eSchristos ev->ev_pncalls = NULL; 13718585484eSchristos (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg); 13728585484eSchristos 13738585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 13748585484eSchristos should_break = base->event_break; 13758585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 13768585484eSchristos 13778585484eSchristos if (should_break) { 13788585484eSchristos if (ncalls != 0) 13798585484eSchristos ev->ev_pncalls = NULL; 13808585484eSchristos return; 13818585484eSchristos } 13828585484eSchristos } 13838585484eSchristos } 13848585484eSchristos 13858585484eSchristos /* Common timeouts are special timeouts that are handled as queues rather than 13868585484eSchristos * in the minheap. This is more efficient than the minheap if we happen to 13878585484eSchristos * know that we're going to get several thousands of timeout events all with 13888585484eSchristos * the same timeout value. 13898585484eSchristos * 13908585484eSchristos * Since all our timeout handling code assumes timevals can be copied, 13918585484eSchristos * assigned, etc, we can't use "magic pointer" to encode these common 13928585484eSchristos * timeouts. Searching through a list to see if every timeout is common could 13938585484eSchristos * also get inefficient. Instead, we take advantage of the fact that tv_usec 13948585484eSchristos * is 32 bits long, but only uses 20 of those bits (since it can never be over 13958585484eSchristos * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits 13968585484eSchristos * of index into the event_base's aray of common timeouts. 13978585484eSchristos */ 13988585484eSchristos 13998585484eSchristos #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK 14008585484eSchristos #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000 14018585484eSchristos #define COMMON_TIMEOUT_IDX_SHIFT 20 14028585484eSchristos #define COMMON_TIMEOUT_MASK 0xf0000000 14038585484eSchristos #define COMMON_TIMEOUT_MAGIC 0x50000000 14048585484eSchristos 14058585484eSchristos #define COMMON_TIMEOUT_IDX(tv) \ 14068585484eSchristos (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT) 14078585484eSchristos 14088585484eSchristos /** Return true iff if 'tv' is a common timeout in 'base' */ 14098585484eSchristos static inline int 14108585484eSchristos is_common_timeout(const struct timeval *tv, 14118585484eSchristos const struct event_base *base) 14128585484eSchristos { 14138585484eSchristos int idx; 14148585484eSchristos if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) 14158585484eSchristos return 0; 14168585484eSchristos idx = COMMON_TIMEOUT_IDX(tv); 14178585484eSchristos return idx < base->n_common_timeouts; 14188585484eSchristos } 14198585484eSchristos 14208585484eSchristos /* True iff tv1 and tv2 have the same common-timeout index, or if neither 14218585484eSchristos * one is a common timeout. */ 14228585484eSchristos static inline int 14238585484eSchristos is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2) 14248585484eSchristos { 14258585484eSchristos return (tv1->tv_usec & ~MICROSECONDS_MASK) == 14268585484eSchristos (tv2->tv_usec & ~MICROSECONDS_MASK); 14278585484eSchristos } 14288585484eSchristos 14298585484eSchristos /** Requires that 'tv' is a common timeout. Return the corresponding 14308585484eSchristos * common_timeout_list. */ 14318585484eSchristos static inline struct common_timeout_list * 14328585484eSchristos get_common_timeout_list(struct event_base *base, const struct timeval *tv) 14338585484eSchristos { 14348585484eSchristos return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)]; 14358585484eSchristos } 14368585484eSchristos 14378585484eSchristos #if 0 14388585484eSchristos static inline int 14398585484eSchristos common_timeout_ok(const struct timeval *tv, 14408585484eSchristos struct event_base *base) 14418585484eSchristos { 14428585484eSchristos const struct timeval *expect = 14438585484eSchristos &get_common_timeout_list(base, tv)->duration; 14448585484eSchristos return tv->tv_sec == expect->tv_sec && 14458585484eSchristos tv->tv_usec == expect->tv_usec; 14468585484eSchristos } 14478585484eSchristos #endif 14488585484eSchristos 14498585484eSchristos /* Add the timeout for the first event in given common timeout list to the 14508585484eSchristos * event_base's minheap. */ 14518585484eSchristos static void 14528585484eSchristos common_timeout_schedule(struct common_timeout_list *ctl, 14538585484eSchristos const struct timeval *now, struct event *head) 14548585484eSchristos { 14558585484eSchristos struct timeval timeout = head->ev_timeout; 14568585484eSchristos timeout.tv_usec &= MICROSECONDS_MASK; 14578585484eSchristos event_add_nolock_(&ctl->timeout_event, &timeout, 1); 14588585484eSchristos } 14598585484eSchristos 14608585484eSchristos /* Callback: invoked when the timeout for a common timeout queue triggers. 14618585484eSchristos * This means that (at least) the first event in that queue should be run, 14628585484eSchristos * and the timeout should be rescheduled if there are more events. */ 14638585484eSchristos static void 14648585484eSchristos common_timeout_callback(evutil_socket_t fd, short what, void *arg) 14658585484eSchristos { 14668585484eSchristos struct timeval now; 14678585484eSchristos struct common_timeout_list *ctl = arg; 14688585484eSchristos struct event_base *base = ctl->base; 14698585484eSchristos struct event *ev = NULL; 14708585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 14718585484eSchristos gettime(base, &now); 14728585484eSchristos while (1) { 14738585484eSchristos ev = TAILQ_FIRST(&ctl->events); 14748585484eSchristos if (!ev || ev->ev_timeout.tv_sec > now.tv_sec || 14758585484eSchristos (ev->ev_timeout.tv_sec == now.tv_sec && 14768585484eSchristos (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) 14778585484eSchristos break; 1478b8ecfcfeSchristos event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 14798585484eSchristos event_active_nolock_(ev, EV_TIMEOUT, 1); 14808585484eSchristos } 14818585484eSchristos if (ev) 14828585484eSchristos common_timeout_schedule(ctl, &now, ev); 14838585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 14848585484eSchristos } 14858585484eSchristos 14868585484eSchristos #define MAX_COMMON_TIMEOUTS 256 14878585484eSchristos 14888585484eSchristos const struct timeval * 14898585484eSchristos event_base_init_common_timeout(struct event_base *base, 14908585484eSchristos const struct timeval *duration) 14918585484eSchristos { 14928585484eSchristos int i; 14938585484eSchristos struct timeval tv; 14948585484eSchristos const struct timeval *result=NULL; 14958585484eSchristos struct common_timeout_list *new_ctl; 14968585484eSchristos 14978585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 14988585484eSchristos if (duration->tv_usec > 1000000) { 14998585484eSchristos memcpy(&tv, duration, sizeof(struct timeval)); 15008585484eSchristos if (is_common_timeout(duration, base)) 15018585484eSchristos tv.tv_usec &= MICROSECONDS_MASK; 15028585484eSchristos tv.tv_sec += tv.tv_usec / 1000000; 15038585484eSchristos tv.tv_usec %= 1000000; 15048585484eSchristos duration = &tv; 15058585484eSchristos } 15068585484eSchristos for (i = 0; i < base->n_common_timeouts; ++i) { 15078585484eSchristos const struct common_timeout_list *ctl = 15088585484eSchristos base->common_timeout_queues[i]; 15098585484eSchristos if (duration->tv_sec == ctl->duration.tv_sec && 15108585484eSchristos duration->tv_usec == 15118585484eSchristos (ctl->duration.tv_usec & MICROSECONDS_MASK)) { 15128585484eSchristos EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base)); 15138585484eSchristos result = &ctl->duration; 15148585484eSchristos goto done; 15158585484eSchristos } 15168585484eSchristos } 15178585484eSchristos if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) { 15188585484eSchristos event_warnx("%s: Too many common timeouts already in use; " 15198585484eSchristos "we only support %d per event_base", __func__, 15208585484eSchristos MAX_COMMON_TIMEOUTS); 15218585484eSchristos goto done; 15228585484eSchristos } 15238585484eSchristos if (base->n_common_timeouts_allocated == base->n_common_timeouts) { 15248585484eSchristos int n = base->n_common_timeouts < 16 ? 16 : 15258585484eSchristos base->n_common_timeouts*2; 15268585484eSchristos struct common_timeout_list **newqueues = 15278585484eSchristos mm_realloc(base->common_timeout_queues, 15288585484eSchristos n*sizeof(struct common_timeout_queue *)); 15298585484eSchristos if (!newqueues) { 15308585484eSchristos event_warn("%s: realloc",__func__); 15318585484eSchristos goto done; 15328585484eSchristos } 15338585484eSchristos base->n_common_timeouts_allocated = n; 15348585484eSchristos base->common_timeout_queues = newqueues; 15358585484eSchristos } 15368585484eSchristos new_ctl = mm_calloc(1, sizeof(struct common_timeout_list)); 15378585484eSchristos if (!new_ctl) { 15388585484eSchristos event_warn("%s: calloc",__func__); 15398585484eSchristos goto done; 15408585484eSchristos } 15418585484eSchristos TAILQ_INIT(&new_ctl->events); 15428585484eSchristos new_ctl->duration.tv_sec = duration->tv_sec; 15438585484eSchristos new_ctl->duration.tv_usec = 15448585484eSchristos duration->tv_usec | COMMON_TIMEOUT_MAGIC | 15458585484eSchristos (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT); 15468585484eSchristos evtimer_assign(&new_ctl->timeout_event, base, 15478585484eSchristos common_timeout_callback, new_ctl); 15488585484eSchristos new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL; 15498585484eSchristos event_priority_set(&new_ctl->timeout_event, 0); 15508585484eSchristos new_ctl->base = base; 15518585484eSchristos base->common_timeout_queues[base->n_common_timeouts++] = new_ctl; 15528585484eSchristos result = &new_ctl->duration; 15538585484eSchristos 15548585484eSchristos done: 15558585484eSchristos if (result) 15568585484eSchristos EVUTIL_ASSERT(is_common_timeout(result, base)); 15578585484eSchristos 15588585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 15598585484eSchristos return result; 15608585484eSchristos } 15618585484eSchristos 15628585484eSchristos /* Closure function invoked when we're activating a persistent event. */ 15638585484eSchristos static inline void 15648585484eSchristos event_persist_closure(struct event_base *base, struct event *ev) 15658585484eSchristos { 1566b8ecfcfeSchristos void (*evcb_callback)(evutil_socket_t, short, void *); 1567b8ecfcfeSchristos 15687476e6e4Schristos // Other fields of *ev that must be stored before executing 15697476e6e4Schristos evutil_socket_t evcb_fd; 15707476e6e4Schristos short evcb_res; 15717476e6e4Schristos void *evcb_arg; 15727476e6e4Schristos 15738585484eSchristos /* reschedule the persistent event if we have a timeout. */ 15748585484eSchristos if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { 15758585484eSchristos /* If there was a timeout, we want it to run at an interval of 15768585484eSchristos * ev_io_timeout after the last time it was _scheduled_ for, 15778585484eSchristos * not ev_io_timeout after _now_. If it fired for another 15788585484eSchristos * reason, though, the timeout ought to start ticking _now_. */ 15798585484eSchristos struct timeval run_at, relative_to, delay, now; 15808585484eSchristos ev_uint32_t usec_mask = 0; 15818585484eSchristos EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout, 15828585484eSchristos &ev->ev_io_timeout)); 15838585484eSchristos gettime(base, &now); 15848585484eSchristos if (is_common_timeout(&ev->ev_timeout, base)) { 15858585484eSchristos delay = ev->ev_io_timeout; 15868585484eSchristos usec_mask = delay.tv_usec & ~MICROSECONDS_MASK; 15878585484eSchristos delay.tv_usec &= MICROSECONDS_MASK; 15888585484eSchristos if (ev->ev_res & EV_TIMEOUT) { 15898585484eSchristos relative_to = ev->ev_timeout; 15908585484eSchristos relative_to.tv_usec &= MICROSECONDS_MASK; 15918585484eSchristos } else { 15928585484eSchristos relative_to = now; 15938585484eSchristos } 15948585484eSchristos } else { 15958585484eSchristos delay = ev->ev_io_timeout; 15968585484eSchristos if (ev->ev_res & EV_TIMEOUT) { 15978585484eSchristos relative_to = ev->ev_timeout; 15988585484eSchristos } else { 15998585484eSchristos relative_to = now; 16008585484eSchristos } 16018585484eSchristos } 16028585484eSchristos evutil_timeradd(&relative_to, &delay, &run_at); 16038585484eSchristos if (evutil_timercmp(&run_at, &now, <)) { 16048585484eSchristos /* Looks like we missed at least one invocation due to 16058585484eSchristos * a clock jump, not running the event loop for a 16068585484eSchristos * while, really slow callbacks, or 16078585484eSchristos * something. Reschedule relative to now. 16088585484eSchristos */ 16098585484eSchristos evutil_timeradd(&now, &delay, &run_at); 16108585484eSchristos } 16118585484eSchristos run_at.tv_usec |= usec_mask; 16128585484eSchristos event_add_nolock_(ev, &run_at, 1); 16138585484eSchristos } 1614b8ecfcfeSchristos 1615b8ecfcfeSchristos // Save our callback before we release the lock 16167476e6e4Schristos evcb_callback = ev->ev_callback; 16177476e6e4Schristos evcb_fd = ev->ev_fd; 16187476e6e4Schristos evcb_res = ev->ev_res; 16197476e6e4Schristos evcb_arg = ev->ev_arg; 1620b8ecfcfeSchristos 1621b8ecfcfeSchristos // Release the lock 16228585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1623b8ecfcfeSchristos 1624b8ecfcfeSchristos // Execute the callback 16257476e6e4Schristos (evcb_callback)(evcb_fd, evcb_res, evcb_arg); 16268585484eSchristos } 16278585484eSchristos 16288585484eSchristos /* 16298585484eSchristos Helper for event_process_active to process all the events in a single queue, 16308585484eSchristos releasing the lock as we go. This function requires that the lock be held 16318585484eSchristos when it's invoked. Returns -1 if we get a signal or an event_break that 16328585484eSchristos means we should stop processing any active events now. Otherwise returns 16338585484eSchristos the number of non-internal event_callbacks that we processed. 16348585484eSchristos */ 16358585484eSchristos static int 16368585484eSchristos event_process_active_single_queue(struct event_base *base, 16378585484eSchristos struct evcallback_list *activeq, 16388585484eSchristos int max_to_process, const struct timeval *endtime) 16398585484eSchristos { 16408585484eSchristos struct event_callback *evcb; 16418585484eSchristos int count = 0; 16428585484eSchristos 16438585484eSchristos EVUTIL_ASSERT(activeq != NULL); 16448585484eSchristos 16458585484eSchristos for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { 16468585484eSchristos struct event *ev=NULL; 16478585484eSchristos if (evcb->evcb_flags & EVLIST_INIT) { 16488585484eSchristos ev = event_callback_to_event(evcb); 16498585484eSchristos 1650b8ecfcfeSchristos if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) 16518585484eSchristos event_queue_remove_active(base, evcb); 16528585484eSchristos else 1653b8ecfcfeSchristos event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 16548585484eSchristos event_debug(( 1655b8ecfcfeSchristos "event_process_active: event: %p, %s%s%scall %p", 16568585484eSchristos ev, 16578585484eSchristos ev->ev_res & EV_READ ? "EV_READ " : " ", 16588585484eSchristos ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", 1659b8ecfcfeSchristos ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", 16608585484eSchristos ev->ev_callback)); 16618585484eSchristos } else { 16628585484eSchristos event_queue_remove_active(base, evcb); 16638585484eSchristos event_debug(("event_process_active: event_callback %p, " 16648585484eSchristos "closure %d, call %p", 16658585484eSchristos evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); 16668585484eSchristos } 16678585484eSchristos 16688585484eSchristos if (!(evcb->evcb_flags & EVLIST_INTERNAL)) 16698585484eSchristos ++count; 16708585484eSchristos 16718585484eSchristos 16728585484eSchristos base->current_event = evcb; 16738585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 16748585484eSchristos base->current_event_waiters = 0; 16758585484eSchristos #endif 16768585484eSchristos 16778585484eSchristos switch (evcb->evcb_closure) { 16788585484eSchristos case EV_CLOSURE_EVENT_SIGNAL: 1679b8ecfcfeSchristos EVUTIL_ASSERT(ev != NULL); 16808585484eSchristos event_signal_closure(base, ev); 16818585484eSchristos break; 16828585484eSchristos case EV_CLOSURE_EVENT_PERSIST: 1683b8ecfcfeSchristos EVUTIL_ASSERT(ev != NULL); 16848585484eSchristos event_persist_closure(base, ev); 16858585484eSchristos break; 1686b8ecfcfeSchristos case EV_CLOSURE_EVENT: { 16877476e6e4Schristos void (*evcb_callback)(evutil_socket_t, short, void *); 1688*eabc0478Schristos short res; 1689b8ecfcfeSchristos EVUTIL_ASSERT(ev != NULL); 16907476e6e4Schristos evcb_callback = *ev->ev_callback; 1691*eabc0478Schristos res = ev->ev_res; 16928585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1693*eabc0478Schristos evcb_callback(ev->ev_fd, res, ev->ev_arg); 1694b8ecfcfeSchristos } 16958585484eSchristos break; 1696b8ecfcfeSchristos case EV_CLOSURE_CB_SELF: { 1697b8ecfcfeSchristos void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; 16988585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1699b8ecfcfeSchristos evcb_selfcb(evcb, evcb->evcb_arg); 1700b8ecfcfeSchristos } 1701b8ecfcfeSchristos break; 1702b8ecfcfeSchristos case EV_CLOSURE_EVENT_FINALIZE: 1703b8ecfcfeSchristos case EV_CLOSURE_EVENT_FINALIZE_FREE: { 17047476e6e4Schristos void (*evcb_evfinalize)(struct event *, void *); 17057476e6e4Schristos int evcb_closure = evcb->evcb_closure; 1706b8ecfcfeSchristos EVUTIL_ASSERT(ev != NULL); 1707b8ecfcfeSchristos base->current_event = NULL; 17087476e6e4Schristos evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; 1709b8ecfcfeSchristos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); 1710b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1711b8ecfcfeSchristos event_debug_note_teardown_(ev); 1712*eabc0478Schristos evcb_evfinalize(ev, ev->ev_arg); 17137476e6e4Schristos if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) 1714b8ecfcfeSchristos mm_free(ev); 1715b8ecfcfeSchristos } 1716b8ecfcfeSchristos break; 1717b8ecfcfeSchristos case EV_CLOSURE_CB_FINALIZE: { 1718b8ecfcfeSchristos void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; 1719b8ecfcfeSchristos base->current_event = NULL; 1720b8ecfcfeSchristos EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); 1721b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 1722b8ecfcfeSchristos evcb_cbfinalize(evcb, evcb->evcb_arg); 1723b8ecfcfeSchristos } 17248585484eSchristos break; 17258585484eSchristos default: 17268585484eSchristos EVUTIL_ASSERT(0); 17278585484eSchristos } 17288585484eSchristos 17298585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 17308585484eSchristos base->current_event = NULL; 17318585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 17328585484eSchristos if (base->current_event_waiters) { 17338585484eSchristos base->current_event_waiters = 0; 17348585484eSchristos EVTHREAD_COND_BROADCAST(base->current_event_cond); 17358585484eSchristos } 17368585484eSchristos #endif 17378585484eSchristos 17388585484eSchristos if (base->event_break) 17398585484eSchristos return -1; 17408585484eSchristos if (count >= max_to_process) 17418585484eSchristos return count; 17428585484eSchristos if (count && endtime) { 17438585484eSchristos struct timeval now; 17448585484eSchristos update_time_cache(base); 17458585484eSchristos gettime(base, &now); 17468585484eSchristos if (evutil_timercmp(&now, endtime, >=)) 17478585484eSchristos return count; 17488585484eSchristos } 17498585484eSchristos if (base->event_continue) 17508585484eSchristos break; 17518585484eSchristos } 17528585484eSchristos return count; 17538585484eSchristos } 17548585484eSchristos 17558585484eSchristos /* 17568585484eSchristos * Active events are stored in priority queues. Lower priorities are always 17578585484eSchristos * process before higher priorities. Low priority events can starve high 17588585484eSchristos * priority ones. 17598585484eSchristos */ 17608585484eSchristos 17618585484eSchristos static int 17628585484eSchristos event_process_active(struct event_base *base) 17638585484eSchristos { 17648585484eSchristos /* Caller must hold th_base_lock */ 17658585484eSchristos struct evcallback_list *activeq = NULL; 17668585484eSchristos int i, c = 0; 17678585484eSchristos const struct timeval *endtime; 17688585484eSchristos struct timeval tv; 17698585484eSchristos const int maxcb = base->max_dispatch_callbacks; 17708585484eSchristos const int limit_after_prio = base->limit_callbacks_after_prio; 17718585484eSchristos if (base->max_dispatch_time.tv_sec >= 0) { 17728585484eSchristos update_time_cache(base); 17738585484eSchristos gettime(base, &tv); 17748585484eSchristos evutil_timeradd(&base->max_dispatch_time, &tv, &tv); 17758585484eSchristos endtime = &tv; 17768585484eSchristos } else { 17778585484eSchristos endtime = NULL; 17788585484eSchristos } 17798585484eSchristos 17808585484eSchristos for (i = 0; i < base->nactivequeues; ++i) { 17818585484eSchristos if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { 17828585484eSchristos base->event_running_priority = i; 17838585484eSchristos activeq = &base->activequeues[i]; 17848585484eSchristos if (i < limit_after_prio) 17858585484eSchristos c = event_process_active_single_queue(base, activeq, 17868585484eSchristos INT_MAX, NULL); 17878585484eSchristos else 17888585484eSchristos c = event_process_active_single_queue(base, activeq, 17898585484eSchristos maxcb, endtime); 17908585484eSchristos if (c < 0) { 17918585484eSchristos goto done; 17928585484eSchristos } else if (c > 0) 17938585484eSchristos break; /* Processed a real event; do not 17948585484eSchristos * consider lower-priority events */ 17958585484eSchristos /* If we get here, all of the events we processed 17968585484eSchristos * were internal. Continue. */ 17978585484eSchristos } 17988585484eSchristos } 17998585484eSchristos 18008585484eSchristos done: 18018585484eSchristos base->event_running_priority = -1; 18028585484eSchristos 18038585484eSchristos return c; 18048585484eSchristos } 18058585484eSchristos 18068585484eSchristos /* 18078585484eSchristos * Wait continuously for events. We exit only if no events are left. 18088585484eSchristos */ 18098585484eSchristos 18108585484eSchristos int 18118585484eSchristos event_dispatch(void) 18128585484eSchristos { 18138585484eSchristos return (event_loop(0)); 18148585484eSchristos } 18158585484eSchristos 18168585484eSchristos int 18178585484eSchristos event_base_dispatch(struct event_base *event_base) 18188585484eSchristos { 18198585484eSchristos return (event_base_loop(event_base, 0)); 18208585484eSchristos } 18218585484eSchristos 18228585484eSchristos const char * 18238585484eSchristos event_base_get_method(const struct event_base *base) 18248585484eSchristos { 18258585484eSchristos EVUTIL_ASSERT(base); 18268585484eSchristos return (base->evsel->name); 18278585484eSchristos } 18288585484eSchristos 18298585484eSchristos /** Callback: used to implement event_base_loopexit by telling the event_base 18308585484eSchristos * that it's time to exit its loop. */ 18318585484eSchristos static void 18328585484eSchristos event_loopexit_cb(evutil_socket_t fd, short what, void *arg) 18338585484eSchristos { 18348585484eSchristos struct event_base *base = arg; 18358585484eSchristos base->event_gotterm = 1; 18368585484eSchristos } 18378585484eSchristos 18388585484eSchristos int 18398585484eSchristos event_loopexit(const struct timeval *tv) 18408585484eSchristos { 18418585484eSchristos return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, 18428585484eSchristos current_base, tv)); 18438585484eSchristos } 18448585484eSchristos 18458585484eSchristos int 18468585484eSchristos event_base_loopexit(struct event_base *event_base, const struct timeval *tv) 18478585484eSchristos { 18488585484eSchristos return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, 18498585484eSchristos event_base, tv)); 18508585484eSchristos } 18518585484eSchristos 18528585484eSchristos int 18538585484eSchristos event_loopbreak(void) 18548585484eSchristos { 18558585484eSchristos return (event_base_loopbreak(current_base)); 18568585484eSchristos } 18578585484eSchristos 18588585484eSchristos int 18598585484eSchristos event_base_loopbreak(struct event_base *event_base) 18608585484eSchristos { 18618585484eSchristos int r = 0; 18628585484eSchristos if (event_base == NULL) 18638585484eSchristos return (-1); 18648585484eSchristos 18658585484eSchristos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 18668585484eSchristos event_base->event_break = 1; 18678585484eSchristos 18688585484eSchristos if (EVBASE_NEED_NOTIFY(event_base)) { 18698585484eSchristos r = evthread_notify_base(event_base); 18708585484eSchristos } else { 18718585484eSchristos r = (0); 18728585484eSchristos } 18738585484eSchristos EVBASE_RELEASE_LOCK(event_base, th_base_lock); 18748585484eSchristos return r; 18758585484eSchristos } 18768585484eSchristos 18778585484eSchristos int 18788585484eSchristos event_base_loopcontinue(struct event_base *event_base) 18798585484eSchristos { 18808585484eSchristos int r = 0; 18818585484eSchristos if (event_base == NULL) 18828585484eSchristos return (-1); 18838585484eSchristos 18848585484eSchristos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 18858585484eSchristos event_base->event_continue = 1; 18868585484eSchristos 18878585484eSchristos if (EVBASE_NEED_NOTIFY(event_base)) { 18888585484eSchristos r = evthread_notify_base(event_base); 18898585484eSchristos } else { 18908585484eSchristos r = (0); 18918585484eSchristos } 18928585484eSchristos EVBASE_RELEASE_LOCK(event_base, th_base_lock); 18938585484eSchristos return r; 18948585484eSchristos } 18958585484eSchristos 18968585484eSchristos int 18978585484eSchristos event_base_got_break(struct event_base *event_base) 18988585484eSchristos { 18998585484eSchristos int res; 19008585484eSchristos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 19018585484eSchristos res = event_base->event_break; 19028585484eSchristos EVBASE_RELEASE_LOCK(event_base, th_base_lock); 19038585484eSchristos return res; 19048585484eSchristos } 19058585484eSchristos 19068585484eSchristos int 19078585484eSchristos event_base_got_exit(struct event_base *event_base) 19088585484eSchristos { 19098585484eSchristos int res; 19108585484eSchristos EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 19118585484eSchristos res = event_base->event_gotterm; 19128585484eSchristos EVBASE_RELEASE_LOCK(event_base, th_base_lock); 19138585484eSchristos return res; 19148585484eSchristos } 19158585484eSchristos 19168585484eSchristos /* not thread safe */ 19178585484eSchristos 19188585484eSchristos int 19198585484eSchristos event_loop(int flags) 19208585484eSchristos { 19218585484eSchristos return event_base_loop(current_base, flags); 19228585484eSchristos } 19238585484eSchristos 19248585484eSchristos int 19258585484eSchristos event_base_loop(struct event_base *base, int flags) 19268585484eSchristos { 19278585484eSchristos const struct eventop *evsel = base->evsel; 19288585484eSchristos struct timeval tv; 19298585484eSchristos struct timeval *tv_p; 19308585484eSchristos int res, done, retval = 0; 19318585484eSchristos 19328585484eSchristos /* Grab the lock. We will release it inside evsel.dispatch, and again 19338585484eSchristos * as we invoke user callbacks. */ 19348585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 19358585484eSchristos 19368585484eSchristos if (base->running_loop) { 19378585484eSchristos event_warnx("%s: reentrant invocation. Only one event_base_loop" 19388585484eSchristos " can run on each event_base at once.", __func__); 19398585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 19408585484eSchristos return -1; 19418585484eSchristos } 19428585484eSchristos 19438585484eSchristos base->running_loop = 1; 19448585484eSchristos 19458585484eSchristos clear_time_cache(base); 19468585484eSchristos 19478585484eSchristos if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) 19488585484eSchristos evsig_set_base_(base); 19498585484eSchristos 19508585484eSchristos done = 0; 19518585484eSchristos 19528585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 19538585484eSchristos base->th_owner_id = EVTHREAD_GET_ID(); 19548585484eSchristos #endif 19558585484eSchristos 19568585484eSchristos base->event_gotterm = base->event_break = 0; 19578585484eSchristos 19588585484eSchristos while (!done) { 19598585484eSchristos base->event_continue = 0; 19608585484eSchristos base->n_deferreds_queued = 0; 19618585484eSchristos 19628585484eSchristos /* Terminate the loop if we have been asked to */ 19638585484eSchristos if (base->event_gotterm) { 19648585484eSchristos break; 19658585484eSchristos } 19668585484eSchristos 19678585484eSchristos if (base->event_break) { 19688585484eSchristos break; 19698585484eSchristos } 19708585484eSchristos 19718585484eSchristos tv_p = &tv; 19728585484eSchristos if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) { 19738585484eSchristos timeout_next(base, &tv_p); 19748585484eSchristos } else { 19758585484eSchristos /* 19768585484eSchristos * if we have active events, we just poll new events 19778585484eSchristos * without waiting. 19788585484eSchristos */ 19798585484eSchristos evutil_timerclear(&tv); 19808585484eSchristos } 19818585484eSchristos 19828585484eSchristos /* If we have no events, we just exit */ 19838585484eSchristos if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) && 19848585484eSchristos !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) { 19858585484eSchristos event_debug(("%s: no events registered.", __func__)); 19868585484eSchristos retval = 1; 19878585484eSchristos goto done; 19888585484eSchristos } 19898585484eSchristos 19908585484eSchristos event_queue_make_later_events_active(base); 19918585484eSchristos 19928585484eSchristos clear_time_cache(base); 19938585484eSchristos 19948585484eSchristos res = evsel->dispatch(base, tv_p); 19958585484eSchristos 19968585484eSchristos if (res == -1) { 19978585484eSchristos event_debug(("%s: dispatch returned unsuccessfully.", 19988585484eSchristos __func__)); 19998585484eSchristos retval = -1; 20008585484eSchristos goto done; 20018585484eSchristos } 20028585484eSchristos 20038585484eSchristos update_time_cache(base); 20048585484eSchristos 20058585484eSchristos timeout_process(base); 20068585484eSchristos 20078585484eSchristos if (N_ACTIVE_CALLBACKS(base)) { 20088585484eSchristos int n = event_process_active(base); 20098585484eSchristos if ((flags & EVLOOP_ONCE) 20108585484eSchristos && N_ACTIVE_CALLBACKS(base) == 0 20118585484eSchristos && n != 0) 20128585484eSchristos done = 1; 20138585484eSchristos } else if (flags & EVLOOP_NONBLOCK) 20148585484eSchristos done = 1; 20158585484eSchristos } 20168585484eSchristos event_debug(("%s: asked to terminate loop.", __func__)); 20178585484eSchristos 20188585484eSchristos done: 20198585484eSchristos clear_time_cache(base); 20208585484eSchristos base->running_loop = 0; 20218585484eSchristos 20228585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 20238585484eSchristos 20248585484eSchristos return (retval); 20258585484eSchristos } 20268585484eSchristos 20278585484eSchristos /* One-time callback to implement event_base_once: invokes the user callback, 20288585484eSchristos * then deletes the allocated storage */ 20298585484eSchristos static void 20308585484eSchristos event_once_cb(evutil_socket_t fd, short events, void *arg) 20318585484eSchristos { 20328585484eSchristos struct event_once *eonce = arg; 20338585484eSchristos 20348585484eSchristos (*eonce->cb)(fd, events, eonce->arg); 20358585484eSchristos EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock); 20368585484eSchristos LIST_REMOVE(eonce, next_once); 20378585484eSchristos EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock); 20388585484eSchristos event_debug_unassign(&eonce->ev); 20398585484eSchristos mm_free(eonce); 20408585484eSchristos } 20418585484eSchristos 20428585484eSchristos /* not threadsafe, event scheduled once. */ 20438585484eSchristos int 20448585484eSchristos event_once(evutil_socket_t fd, short events, 20458585484eSchristos void (*callback)(evutil_socket_t, short, void *), 20468585484eSchristos void *arg, const struct timeval *tv) 20478585484eSchristos { 20488585484eSchristos return event_base_once(current_base, fd, events, callback, arg, tv); 20498585484eSchristos } 20508585484eSchristos 20518585484eSchristos /* Schedules an event once */ 20528585484eSchristos int 20538585484eSchristos event_base_once(struct event_base *base, evutil_socket_t fd, short events, 20548585484eSchristos void (*callback)(evutil_socket_t, short, void *), 20558585484eSchristos void *arg, const struct timeval *tv) 20568585484eSchristos { 20578585484eSchristos struct event_once *eonce; 20588585484eSchristos int res = 0; 20598585484eSchristos int activate = 0; 20608585484eSchristos 2061*eabc0478Schristos if (!base) 2062*eabc0478Schristos return (-1); 2063*eabc0478Schristos 20648585484eSchristos /* We cannot support signals that just fire once, or persistent 20658585484eSchristos * events. */ 20668585484eSchristos if (events & (EV_SIGNAL|EV_PERSIST)) 20678585484eSchristos return (-1); 20688585484eSchristos 20698585484eSchristos if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL) 20708585484eSchristos return (-1); 20718585484eSchristos 20728585484eSchristos eonce->cb = callback; 20738585484eSchristos eonce->arg = arg; 20748585484eSchristos 2075b8ecfcfeSchristos if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) { 20768585484eSchristos evtimer_assign(&eonce->ev, base, event_once_cb, eonce); 20778585484eSchristos 20788585484eSchristos if (tv == NULL || ! evutil_timerisset(tv)) { 20798585484eSchristos /* If the event is going to become active immediately, 20808585484eSchristos * don't put it on the timeout queue. This is one 20818585484eSchristos * idiom for scheduling a callback, so let's make 20828585484eSchristos * it fast (and order-preserving). */ 20838585484eSchristos activate = 1; 20848585484eSchristos } 2085b8ecfcfeSchristos } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) { 2086b8ecfcfeSchristos events &= EV_READ|EV_WRITE|EV_CLOSED; 20878585484eSchristos 20888585484eSchristos event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce); 20898585484eSchristos } else { 20908585484eSchristos /* Bad event combination */ 20918585484eSchristos mm_free(eonce); 20928585484eSchristos return (-1); 20938585484eSchristos } 20948585484eSchristos 20958585484eSchristos if (res == 0) { 20968585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 20978585484eSchristos if (activate) 20988585484eSchristos event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1); 20998585484eSchristos else 21008585484eSchristos res = event_add_nolock_(&eonce->ev, tv, 0); 21018585484eSchristos 21028585484eSchristos if (res != 0) { 21038585484eSchristos mm_free(eonce); 21048585484eSchristos return (res); 21058585484eSchristos } else { 21068585484eSchristos LIST_INSERT_HEAD(&base->once_events, eonce, next_once); 21078585484eSchristos } 21088585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 21098585484eSchristos } 21108585484eSchristos 21118585484eSchristos return (0); 21128585484eSchristos } 21138585484eSchristos 21148585484eSchristos int 21158585484eSchristos event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg) 21168585484eSchristos { 21178585484eSchristos if (!base) 21188585484eSchristos base = current_base; 21198585484eSchristos if (arg == &event_self_cbarg_ptr_) 21208585484eSchristos arg = ev; 21218585484eSchristos 2122*eabc0478Schristos if (!(events & EV_SIGNAL)) 2123*eabc0478Schristos event_debug_assert_socket_nonblocking_(fd); 21248585484eSchristos event_debug_assert_not_added_(ev); 21258585484eSchristos 21268585484eSchristos ev->ev_base = base; 21278585484eSchristos 21288585484eSchristos ev->ev_callback = callback; 21298585484eSchristos ev->ev_arg = arg; 21308585484eSchristos ev->ev_fd = fd; 21318585484eSchristos ev->ev_events = events; 21328585484eSchristos ev->ev_res = 0; 21338585484eSchristos ev->ev_flags = EVLIST_INIT; 21348585484eSchristos ev->ev_ncalls = 0; 21358585484eSchristos ev->ev_pncalls = NULL; 21368585484eSchristos 21378585484eSchristos if (events & EV_SIGNAL) { 2138b8ecfcfeSchristos if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) { 21398585484eSchristos event_warnx("%s: EV_SIGNAL is not compatible with " 2140b8ecfcfeSchristos "EV_READ, EV_WRITE or EV_CLOSED", __func__); 21418585484eSchristos return -1; 21428585484eSchristos } 21438585484eSchristos ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL; 21448585484eSchristos } else { 21458585484eSchristos if (events & EV_PERSIST) { 21468585484eSchristos evutil_timerclear(&ev->ev_io_timeout); 21478585484eSchristos ev->ev_closure = EV_CLOSURE_EVENT_PERSIST; 21488585484eSchristos } else { 21498585484eSchristos ev->ev_closure = EV_CLOSURE_EVENT; 21508585484eSchristos } 21518585484eSchristos } 21528585484eSchristos 21538585484eSchristos min_heap_elem_init_(ev); 21548585484eSchristos 21558585484eSchristos if (base != NULL) { 21568585484eSchristos /* by default, we put new events into the middle priority */ 21578585484eSchristos ev->ev_pri = base->nactivequeues / 2; 21588585484eSchristos } 21598585484eSchristos 21608585484eSchristos event_debug_note_setup_(ev); 21618585484eSchristos 21628585484eSchristos return 0; 21638585484eSchristos } 21648585484eSchristos 21658585484eSchristos int 21668585484eSchristos event_base_set(struct event_base *base, struct event *ev) 21678585484eSchristos { 21688585484eSchristos /* Only innocent events may be assigned to a different base */ 21698585484eSchristos if (ev->ev_flags != EVLIST_INIT) 21708585484eSchristos return (-1); 21718585484eSchristos 21728585484eSchristos event_debug_assert_is_setup_(ev); 21738585484eSchristos 21748585484eSchristos ev->ev_base = base; 21758585484eSchristos ev->ev_pri = base->nactivequeues/2; 21768585484eSchristos 21778585484eSchristos return (0); 21788585484eSchristos } 21798585484eSchristos 21808585484eSchristos void 21818585484eSchristos event_set(struct event *ev, evutil_socket_t fd, short events, 21828585484eSchristos void (*callback)(evutil_socket_t, short, void *), void *arg) 21838585484eSchristos { 21848585484eSchristos int r; 21858585484eSchristos r = event_assign(ev, current_base, fd, events, callback, arg); 21868585484eSchristos EVUTIL_ASSERT(r == 0); 21878585484eSchristos } 21888585484eSchristos 21898585484eSchristos void * 21908585484eSchristos event_self_cbarg(void) 21918585484eSchristos { 21928585484eSchristos return &event_self_cbarg_ptr_; 21938585484eSchristos } 21948585484eSchristos 21958585484eSchristos struct event * 21968585484eSchristos event_base_get_running_event(struct event_base *base) 21978585484eSchristos { 21988585484eSchristos struct event *ev = NULL; 21998585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 22008585484eSchristos if (EVBASE_IN_THREAD(base)) { 22018585484eSchristos struct event_callback *evcb = base->current_event; 22028585484eSchristos if (evcb->evcb_flags & EVLIST_INIT) 22038585484eSchristos ev = event_callback_to_event(evcb); 22048585484eSchristos } 22058585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 22068585484eSchristos return ev; 22078585484eSchristos } 22088585484eSchristos 22098585484eSchristos struct event * 22108585484eSchristos event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg) 22118585484eSchristos { 22128585484eSchristos struct event *ev; 22138585484eSchristos ev = mm_malloc(sizeof(struct event)); 22148585484eSchristos if (ev == NULL) 22158585484eSchristos return (NULL); 22168585484eSchristos if (event_assign(ev, base, fd, events, cb, arg) < 0) { 22178585484eSchristos mm_free(ev); 22188585484eSchristos return (NULL); 22198585484eSchristos } 22208585484eSchristos 22218585484eSchristos return (ev); 22228585484eSchristos } 22238585484eSchristos 22248585484eSchristos void 22258585484eSchristos event_free(struct event *ev) 22268585484eSchristos { 2227b8ecfcfeSchristos /* This is disabled, so that events which have been finalized be a 2228b8ecfcfeSchristos * valid target for event_free(). That's */ 2229b8ecfcfeSchristos // event_debug_assert_is_setup_(ev); 22308585484eSchristos 22318585484eSchristos /* make sure that this event won't be coming back to haunt us. */ 22328585484eSchristos event_del(ev); 22338585484eSchristos event_debug_note_teardown_(ev); 22348585484eSchristos mm_free(ev); 22358585484eSchristos 22368585484eSchristos } 22378585484eSchristos 22388585484eSchristos void 22398585484eSchristos event_debug_unassign(struct event *ev) 22408585484eSchristos { 22418585484eSchristos event_debug_assert_not_added_(ev); 22428585484eSchristos event_debug_note_teardown_(ev); 22438585484eSchristos 22448585484eSchristos ev->ev_flags &= ~EVLIST_INIT; 22458585484eSchristos } 22468585484eSchristos 2247b8ecfcfeSchristos #define EVENT_FINALIZE_FREE_ 0x10000 2248b8ecfcfeSchristos static int 2249b8ecfcfeSchristos event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2250b8ecfcfeSchristos { 2251b8ecfcfeSchristos ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ? 2252b8ecfcfeSchristos EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE; 2253b8ecfcfeSchristos 2254b8ecfcfeSchristos event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 2255b8ecfcfeSchristos ev->ev_closure = closure; 2256b8ecfcfeSchristos ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb; 2257b8ecfcfeSchristos event_active_nolock_(ev, EV_FINALIZE, 1); 2258b8ecfcfeSchristos ev->ev_flags |= EVLIST_FINALIZING; 2259b8ecfcfeSchristos return 0; 2260b8ecfcfeSchristos } 2261b8ecfcfeSchristos 2262b8ecfcfeSchristos static int 2263b8ecfcfeSchristos event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2264b8ecfcfeSchristos { 2265b8ecfcfeSchristos int r; 2266b8ecfcfeSchristos struct event_base *base = ev->ev_base; 2267b8ecfcfeSchristos if (EVUTIL_FAILURE_CHECK(!base)) { 2268b8ecfcfeSchristos event_warnx("%s: event has no event_base set.", __func__); 2269b8ecfcfeSchristos return -1; 2270b8ecfcfeSchristos } 2271b8ecfcfeSchristos 2272b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2273b8ecfcfeSchristos r = event_finalize_nolock_(base, flags, ev, cb); 2274b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 2275b8ecfcfeSchristos return r; 2276b8ecfcfeSchristos } 2277b8ecfcfeSchristos 2278b8ecfcfeSchristos int 2279b8ecfcfeSchristos event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2280b8ecfcfeSchristos { 2281b8ecfcfeSchristos return event_finalize_impl_(flags, ev, cb); 2282b8ecfcfeSchristos } 2283b8ecfcfeSchristos 2284b8ecfcfeSchristos int 2285b8ecfcfeSchristos event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2286b8ecfcfeSchristos { 2287b8ecfcfeSchristos return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb); 2288b8ecfcfeSchristos } 2289b8ecfcfeSchristos 2290b8ecfcfeSchristos void 2291b8ecfcfeSchristos event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) 2292b8ecfcfeSchristos { 2293b8ecfcfeSchristos struct event *ev = NULL; 2294b8ecfcfeSchristos if (evcb->evcb_flags & EVLIST_INIT) { 2295b8ecfcfeSchristos ev = event_callback_to_event(evcb); 2296b8ecfcfeSchristos event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 2297b8ecfcfeSchristos } else { 2298b8ecfcfeSchristos event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/ 2299b8ecfcfeSchristos } 2300b8ecfcfeSchristos 2301b8ecfcfeSchristos evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE; 2302b8ecfcfeSchristos evcb->evcb_cb_union.evcb_cbfinalize = cb; 2303b8ecfcfeSchristos event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/ 2304b8ecfcfeSchristos evcb->evcb_flags |= EVLIST_FINALIZING; 2305b8ecfcfeSchristos } 2306b8ecfcfeSchristos 2307b8ecfcfeSchristos void 2308b8ecfcfeSchristos event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) 2309b8ecfcfeSchristos { 2310b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2311b8ecfcfeSchristos event_callback_finalize_nolock_(base, flags, evcb, cb); 2312b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 2313b8ecfcfeSchristos } 2314b8ecfcfeSchristos 2315b8ecfcfeSchristos /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided 2316b8ecfcfeSchristos * callback will be invoked on *one of them*, after they have *all* been 2317b8ecfcfeSchristos * finalized. */ 2318b8ecfcfeSchristos int 2319b8ecfcfeSchristos event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *)) 2320b8ecfcfeSchristos { 2321b8ecfcfeSchristos int n_pending = 0, i; 2322b8ecfcfeSchristos 2323b8ecfcfeSchristos if (base == NULL) 2324b8ecfcfeSchristos base = current_base; 2325b8ecfcfeSchristos 2326b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2327b8ecfcfeSchristos 2328b8ecfcfeSchristos event_debug(("%s: %d events finalizing", __func__, n_cbs)); 2329b8ecfcfeSchristos 2330b8ecfcfeSchristos /* At most one can be currently executing; the rest we just 2331b8ecfcfeSchristos * cancel... But we always make sure that the finalize callback 2332b8ecfcfeSchristos * runs. */ 2333b8ecfcfeSchristos for (i = 0; i < n_cbs; ++i) { 2334b8ecfcfeSchristos struct event_callback *evcb = evcbs[i]; 2335b8ecfcfeSchristos if (evcb == base->current_event) { 2336b8ecfcfeSchristos event_callback_finalize_nolock_(base, 0, evcb, cb); 2337b8ecfcfeSchristos ++n_pending; 2338b8ecfcfeSchristos } else { 2339b8ecfcfeSchristos event_callback_cancel_nolock_(base, evcb, 0); 2340b8ecfcfeSchristos } 2341b8ecfcfeSchristos } 2342b8ecfcfeSchristos 2343b8ecfcfeSchristos if (n_pending == 0) { 2344b8ecfcfeSchristos /* Just do the first one. */ 2345b8ecfcfeSchristos event_callback_finalize_nolock_(base, 0, evcbs[0], cb); 2346b8ecfcfeSchristos } 2347b8ecfcfeSchristos 2348b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 2349b8ecfcfeSchristos return 0; 2350b8ecfcfeSchristos } 2351b8ecfcfeSchristos 23528585484eSchristos /* 23538585484eSchristos * Set's the priority of an event - if an event is already scheduled 23548585484eSchristos * changing the priority is going to fail. 23558585484eSchristos */ 23568585484eSchristos 23578585484eSchristos int 23588585484eSchristos event_priority_set(struct event *ev, int pri) 23598585484eSchristos { 23608585484eSchristos event_debug_assert_is_setup_(ev); 23618585484eSchristos 23628585484eSchristos if (ev->ev_flags & EVLIST_ACTIVE) 23638585484eSchristos return (-1); 23648585484eSchristos if (pri < 0 || pri >= ev->ev_base->nactivequeues) 23658585484eSchristos return (-1); 23668585484eSchristos 23678585484eSchristos ev->ev_pri = pri; 23688585484eSchristos 23698585484eSchristos return (0); 23708585484eSchristos } 23718585484eSchristos 23728585484eSchristos /* 23738585484eSchristos * Checks if a specific event is pending or scheduled. 23748585484eSchristos */ 23758585484eSchristos 23768585484eSchristos int 23778585484eSchristos event_pending(const struct event *ev, short event, struct timeval *tv) 23788585484eSchristos { 23798585484eSchristos int flags = 0; 23808585484eSchristos 23818585484eSchristos if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) { 23828585484eSchristos event_warnx("%s: event has no event_base set.", __func__); 23838585484eSchristos return 0; 23848585484eSchristos } 23858585484eSchristos 23868585484eSchristos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 23878585484eSchristos event_debug_assert_is_setup_(ev); 23888585484eSchristos 23898585484eSchristos if (ev->ev_flags & EVLIST_INSERTED) 2390b8ecfcfeSchristos flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)); 23918585484eSchristos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) 23928585484eSchristos flags |= ev->ev_res; 23938585484eSchristos if (ev->ev_flags & EVLIST_TIMEOUT) 23948585484eSchristos flags |= EV_TIMEOUT; 23958585484eSchristos 2396b8ecfcfeSchristos event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL); 23978585484eSchristos 23988585484eSchristos /* See if there is a timeout that we should report */ 23998585484eSchristos if (tv != NULL && (flags & event & EV_TIMEOUT)) { 24008585484eSchristos struct timeval tmp = ev->ev_timeout; 24018585484eSchristos tmp.tv_usec &= MICROSECONDS_MASK; 24028585484eSchristos /* correctly remamp to real time */ 24038585484eSchristos evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv); 24048585484eSchristos } 24058585484eSchristos 24068585484eSchristos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 24078585484eSchristos 24088585484eSchristos return (flags & event); 24098585484eSchristos } 24108585484eSchristos 24118585484eSchristos int 24128585484eSchristos event_initialized(const struct event *ev) 24138585484eSchristos { 24148585484eSchristos if (!(ev->ev_flags & EVLIST_INIT)) 24158585484eSchristos return 0; 24168585484eSchristos 24178585484eSchristos return 1; 24188585484eSchristos } 24198585484eSchristos 24208585484eSchristos void 24218585484eSchristos event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out) 24228585484eSchristos { 24238585484eSchristos event_debug_assert_is_setup_(event); 24248585484eSchristos 24258585484eSchristos if (base_out) 24268585484eSchristos *base_out = event->ev_base; 24278585484eSchristos if (fd_out) 24288585484eSchristos *fd_out = event->ev_fd; 24298585484eSchristos if (events_out) 24308585484eSchristos *events_out = event->ev_events; 24318585484eSchristos if (callback_out) 24328585484eSchristos *callback_out = event->ev_callback; 24338585484eSchristos if (arg_out) 24348585484eSchristos *arg_out = event->ev_arg; 24358585484eSchristos } 24368585484eSchristos 24378585484eSchristos size_t 24388585484eSchristos event_get_struct_event_size(void) 24398585484eSchristos { 24408585484eSchristos return sizeof(struct event); 24418585484eSchristos } 24428585484eSchristos 24438585484eSchristos evutil_socket_t 24448585484eSchristos event_get_fd(const struct event *ev) 24458585484eSchristos { 24468585484eSchristos event_debug_assert_is_setup_(ev); 24478585484eSchristos return ev->ev_fd; 24488585484eSchristos } 24498585484eSchristos 24508585484eSchristos struct event_base * 24518585484eSchristos event_get_base(const struct event *ev) 24528585484eSchristos { 24538585484eSchristos event_debug_assert_is_setup_(ev); 24548585484eSchristos return ev->ev_base; 24558585484eSchristos } 24568585484eSchristos 24578585484eSchristos short 24588585484eSchristos event_get_events(const struct event *ev) 24598585484eSchristos { 24608585484eSchristos event_debug_assert_is_setup_(ev); 24618585484eSchristos return ev->ev_events; 24628585484eSchristos } 24638585484eSchristos 24648585484eSchristos event_callback_fn 24658585484eSchristos event_get_callback(const struct event *ev) 24668585484eSchristos { 24678585484eSchristos event_debug_assert_is_setup_(ev); 24688585484eSchristos return ev->ev_callback; 24698585484eSchristos } 24708585484eSchristos 24718585484eSchristos void * 24728585484eSchristos event_get_callback_arg(const struct event *ev) 24738585484eSchristos { 24748585484eSchristos event_debug_assert_is_setup_(ev); 24758585484eSchristos return ev->ev_arg; 24768585484eSchristos } 24778585484eSchristos 24788585484eSchristos int 24798585484eSchristos event_get_priority(const struct event *ev) 24808585484eSchristos { 24818585484eSchristos event_debug_assert_is_setup_(ev); 24828585484eSchristos return ev->ev_pri; 24838585484eSchristos } 24848585484eSchristos 24858585484eSchristos int 24868585484eSchristos event_add(struct event *ev, const struct timeval *tv) 24878585484eSchristos { 24888585484eSchristos int res; 24898585484eSchristos 24908585484eSchristos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 24918585484eSchristos event_warnx("%s: event has no event_base set.", __func__); 24928585484eSchristos return -1; 24938585484eSchristos } 24948585484eSchristos 24958585484eSchristos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 24968585484eSchristos 24978585484eSchristos res = event_add_nolock_(ev, tv, 0); 24988585484eSchristos 24998585484eSchristos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 25008585484eSchristos 25018585484eSchristos return (res); 25028585484eSchristos } 25038585484eSchristos 25048585484eSchristos /* Helper callback: wake an event_base from another thread. This version 25058585484eSchristos * works by writing a byte to one end of a socketpair, so that the event_base 25068585484eSchristos * listening on the other end will wake up as the corresponding event 25078585484eSchristos * triggers */ 25088585484eSchristos static int 25098585484eSchristos evthread_notify_base_default(struct event_base *base) 25108585484eSchristos { 25118585484eSchristos char buf[1]; 25128585484eSchristos int r; 25138585484eSchristos buf[0] = (char) 0; 25148585484eSchristos #ifdef _WIN32 25158585484eSchristos r = send(base->th_notify_fd[1], buf, 1, 0); 25168585484eSchristos #else 25178585484eSchristos r = write(base->th_notify_fd[1], buf, 1); 25188585484eSchristos #endif 25198585484eSchristos return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; 25208585484eSchristos } 25218585484eSchristos 25228585484eSchristos #ifdef EVENT__HAVE_EVENTFD 25238585484eSchristos /* Helper callback: wake an event_base from another thread. This version 25248585484eSchristos * assumes that you have a working eventfd() implementation. */ 25258585484eSchristos static int 25268585484eSchristos evthread_notify_base_eventfd(struct event_base *base) 25278585484eSchristos { 25288585484eSchristos ev_uint64_t msg = 1; 25298585484eSchristos int r; 25308585484eSchristos do { 25318585484eSchristos r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg)); 25328585484eSchristos } while (r < 0 && errno == EAGAIN); 25338585484eSchristos 25348585484eSchristos return (r < 0) ? -1 : 0; 25358585484eSchristos } 25368585484eSchristos #endif 25378585484eSchristos 25388585484eSchristos 25398585484eSchristos /** Tell the thread currently running the event_loop for base (if any) that it 25408585484eSchristos * needs to stop waiting in its dispatch function (if it is) and process all 25418585484eSchristos * active callbacks. */ 25428585484eSchristos static int 25438585484eSchristos evthread_notify_base(struct event_base *base) 25448585484eSchristos { 25458585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 25468585484eSchristos if (!base->th_notify_fn) 25478585484eSchristos return -1; 25488585484eSchristos if (base->is_notify_pending) 25498585484eSchristos return 0; 25508585484eSchristos base->is_notify_pending = 1; 25518585484eSchristos return base->th_notify_fn(base); 25528585484eSchristos } 25538585484eSchristos 25548585484eSchristos /* Implementation function to remove a timeout on a currently pending event. 25558585484eSchristos */ 25568585484eSchristos int 25578585484eSchristos event_remove_timer_nolock_(struct event *ev) 25588585484eSchristos { 25598585484eSchristos struct event_base *base = ev->ev_base; 25608585484eSchristos 25618585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 25628585484eSchristos event_debug_assert_is_setup_(ev); 25638585484eSchristos 25648585484eSchristos event_debug(("event_remove_timer_nolock: event: %p", ev)); 25658585484eSchristos 25668585484eSchristos /* If it's not pending on a timeout, we don't need to do anything. */ 25678585484eSchristos if (ev->ev_flags & EVLIST_TIMEOUT) { 25688585484eSchristos event_queue_remove_timeout(base, ev); 25698585484eSchristos evutil_timerclear(&ev->ev_.ev_io.ev_timeout); 25708585484eSchristos } 25718585484eSchristos 25728585484eSchristos return (0); 25738585484eSchristos } 25748585484eSchristos 25758585484eSchristos int 25768585484eSchristos event_remove_timer(struct event *ev) 25778585484eSchristos { 25788585484eSchristos int res; 25798585484eSchristos 25808585484eSchristos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 25818585484eSchristos event_warnx("%s: event has no event_base set.", __func__); 25828585484eSchristos return -1; 25838585484eSchristos } 25848585484eSchristos 25858585484eSchristos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 25868585484eSchristos 25878585484eSchristos res = event_remove_timer_nolock_(ev); 25888585484eSchristos 25898585484eSchristos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 25908585484eSchristos 25918585484eSchristos return (res); 25928585484eSchristos } 25938585484eSchristos 25948585484eSchristos /* Implementation function to add an event. Works just like event_add, 25958585484eSchristos * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set, 25968585484eSchristos * we treat tv as an absolute time, not as an interval to add to the current 25978585484eSchristos * time */ 25988585484eSchristos int 25998585484eSchristos event_add_nolock_(struct event *ev, const struct timeval *tv, 26008585484eSchristos int tv_is_absolute) 26018585484eSchristos { 26028585484eSchristos struct event_base *base = ev->ev_base; 26038585484eSchristos int res = 0; 26048585484eSchristos int notify = 0; 26058585484eSchristos 26068585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 26078585484eSchristos event_debug_assert_is_setup_(ev); 26088585484eSchristos 26098585484eSchristos event_debug(( 2610b8ecfcfeSchristos "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p", 26118585484eSchristos ev, 26128585484eSchristos EV_SOCK_ARG(ev->ev_fd), 26138585484eSchristos ev->ev_events & EV_READ ? "EV_READ " : " ", 26148585484eSchristos ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", 2615b8ecfcfeSchristos ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ", 26168585484eSchristos tv ? "EV_TIMEOUT " : " ", 26178585484eSchristos ev->ev_callback)); 26188585484eSchristos 26198585484eSchristos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); 26208585484eSchristos 2621b8ecfcfeSchristos if (ev->ev_flags & EVLIST_FINALIZING) { 2622b8ecfcfeSchristos /* XXXX debug */ 2623b8ecfcfeSchristos return (-1); 2624b8ecfcfeSchristos } 2625b8ecfcfeSchristos 26268585484eSchristos /* 26278585484eSchristos * prepare for timeout insertion further below, if we get a 26288585484eSchristos * failure on any step, we should not change any state. 26298585484eSchristos */ 26308585484eSchristos if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { 26318585484eSchristos if (min_heap_reserve_(&base->timeheap, 26328585484eSchristos 1 + min_heap_size_(&base->timeheap)) == -1) 26338585484eSchristos return (-1); /* ENOMEM == errno */ 26348585484eSchristos } 26358585484eSchristos 26368585484eSchristos /* If the main thread is currently executing a signal event's 26378585484eSchristos * callback, and we are not the main thread, then we want to wait 26388585484eSchristos * until the callback is done before we mess with the event, or else 26398585484eSchristos * we can race on ev_ncalls and ev_pncalls below. */ 26408585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 26418585484eSchristos if (base->current_event == event_to_event_callback(ev) && 26428585484eSchristos (ev->ev_events & EV_SIGNAL) 26438585484eSchristos && !EVBASE_IN_THREAD(base)) { 26448585484eSchristos ++base->current_event_waiters; 26458585484eSchristos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 26468585484eSchristos } 26478585484eSchristos #endif 26488585484eSchristos 2649b8ecfcfeSchristos if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) && 26508585484eSchristos !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 2651b8ecfcfeSchristos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) 26528585484eSchristos res = evmap_io_add_(base, ev->ev_fd, ev); 26538585484eSchristos else if (ev->ev_events & EV_SIGNAL) 26548585484eSchristos res = evmap_signal_add_(base, (int)ev->ev_fd, ev); 26558585484eSchristos if (res != -1) 26568585484eSchristos event_queue_insert_inserted(base, ev); 26578585484eSchristos if (res == 1) { 26588585484eSchristos /* evmap says we need to notify the main thread. */ 26598585484eSchristos notify = 1; 26608585484eSchristos res = 0; 26618585484eSchristos } 26628585484eSchristos } 26638585484eSchristos 26648585484eSchristos /* 26658585484eSchristos * we should change the timeout state only if the previous event 26668585484eSchristos * addition succeeded. 26678585484eSchristos */ 26688585484eSchristos if (res != -1 && tv != NULL) { 26698585484eSchristos struct timeval now; 26708585484eSchristos int common_timeout; 26718585484eSchristos #ifdef USE_REINSERT_TIMEOUT 26728585484eSchristos int was_common; 26738585484eSchristos int old_timeout_idx; 26748585484eSchristos #endif 26758585484eSchristos 26768585484eSchristos /* 26778585484eSchristos * for persistent timeout events, we remember the 26788585484eSchristos * timeout value and re-add the event. 26798585484eSchristos * 26808585484eSchristos * If tv_is_absolute, this was already set. 26818585484eSchristos */ 26828585484eSchristos if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute) 26838585484eSchristos ev->ev_io_timeout = *tv; 26848585484eSchristos 26858585484eSchristos #ifndef USE_REINSERT_TIMEOUT 26868585484eSchristos if (ev->ev_flags & EVLIST_TIMEOUT) { 26878585484eSchristos event_queue_remove_timeout(base, ev); 26888585484eSchristos } 26898585484eSchristos #endif 26908585484eSchristos 26918585484eSchristos /* Check if it is active due to a timeout. Rescheduling 26928585484eSchristos * this timeout before the callback can be executed 26938585484eSchristos * removes it from the active list. */ 26948585484eSchristos if ((ev->ev_flags & EVLIST_ACTIVE) && 26958585484eSchristos (ev->ev_res & EV_TIMEOUT)) { 26968585484eSchristos if (ev->ev_events & EV_SIGNAL) { 26978585484eSchristos /* See if we are just active executing 26988585484eSchristos * this event in a loop 26998585484eSchristos */ 27008585484eSchristos if (ev->ev_ncalls && ev->ev_pncalls) { 27018585484eSchristos /* Abort loop */ 27028585484eSchristos *ev->ev_pncalls = 0; 27038585484eSchristos } 27048585484eSchristos } 27058585484eSchristos 27068585484eSchristos event_queue_remove_active(base, event_to_event_callback(ev)); 27078585484eSchristos } 27088585484eSchristos 27098585484eSchristos gettime(base, &now); 27108585484eSchristos 27118585484eSchristos common_timeout = is_common_timeout(tv, base); 27128585484eSchristos #ifdef USE_REINSERT_TIMEOUT 27138585484eSchristos was_common = is_common_timeout(&ev->ev_timeout, base); 27148585484eSchristos old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout); 27158585484eSchristos #endif 27168585484eSchristos 27178585484eSchristos if (tv_is_absolute) { 27188585484eSchristos ev->ev_timeout = *tv; 27198585484eSchristos } else if (common_timeout) { 27208585484eSchristos struct timeval tmp = *tv; 27218585484eSchristos tmp.tv_usec &= MICROSECONDS_MASK; 27228585484eSchristos evutil_timeradd(&now, &tmp, &ev->ev_timeout); 27238585484eSchristos ev->ev_timeout.tv_usec |= 27248585484eSchristos (tv->tv_usec & ~MICROSECONDS_MASK); 27258585484eSchristos } else { 27268585484eSchristos evutil_timeradd(&now, tv, &ev->ev_timeout); 27278585484eSchristos } 27288585484eSchristos 27298585484eSchristos event_debug(( 27308585484eSchristos "event_add: event %p, timeout in %d seconds %d useconds, call %p", 27318585484eSchristos ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback)); 27328585484eSchristos 27338585484eSchristos #ifdef USE_REINSERT_TIMEOUT 27348585484eSchristos event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx); 27358585484eSchristos #else 27368585484eSchristos event_queue_insert_timeout(base, ev); 27378585484eSchristos #endif 27388585484eSchristos 27398585484eSchristos if (common_timeout) { 27408585484eSchristos struct common_timeout_list *ctl = 27418585484eSchristos get_common_timeout_list(base, &ev->ev_timeout); 27428585484eSchristos if (ev == TAILQ_FIRST(&ctl->events)) { 27438585484eSchristos common_timeout_schedule(ctl, &now, ev); 27448585484eSchristos } 27458585484eSchristos } else { 27468585484eSchristos struct event* top = NULL; 27478585484eSchristos /* See if the earliest timeout is now earlier than it 27488585484eSchristos * was before: if so, we will need to tell the main 27498585484eSchristos * thread to wake up earlier than it would otherwise. 27508585484eSchristos * We double check the timeout of the top element to 27518585484eSchristos * handle time distortions due to system suspension. 27528585484eSchristos */ 27538585484eSchristos if (min_heap_elt_is_top_(ev)) 27548585484eSchristos notify = 1; 27558585484eSchristos else if ((top = min_heap_top_(&base->timeheap)) != NULL && 27568585484eSchristos evutil_timercmp(&top->ev_timeout, &now, <)) 27578585484eSchristos notify = 1; 27588585484eSchristos } 27598585484eSchristos } 27608585484eSchristos 27618585484eSchristos /* if we are not in the right thread, we need to wake up the loop */ 27628585484eSchristos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) 27638585484eSchristos evthread_notify_base(base); 27648585484eSchristos 27658585484eSchristos event_debug_note_add_(ev); 27668585484eSchristos 27678585484eSchristos return (res); 27688585484eSchristos } 27698585484eSchristos 2770b8ecfcfeSchristos static int 2771b8ecfcfeSchristos event_del_(struct event *ev, int blocking) 27728585484eSchristos { 27738585484eSchristos int res; 2774*eabc0478Schristos struct event_base *base = ev->ev_base; 27758585484eSchristos 2776*eabc0478Schristos if (EVUTIL_FAILURE_CHECK(!base)) { 27778585484eSchristos event_warnx("%s: event has no event_base set.", __func__); 27788585484eSchristos return -1; 27798585484eSchristos } 27808585484eSchristos 2781*eabc0478Schristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2782b8ecfcfeSchristos res = event_del_nolock_(ev, blocking); 2783*eabc0478Schristos EVBASE_RELEASE_LOCK(base, th_base_lock); 27848585484eSchristos 27858585484eSchristos return (res); 27868585484eSchristos } 27878585484eSchristos 27888585484eSchristos int 2789b8ecfcfeSchristos event_del(struct event *ev) 2790b8ecfcfeSchristos { 2791b8ecfcfeSchristos return event_del_(ev, EVENT_DEL_AUTOBLOCK); 2792b8ecfcfeSchristos } 2793b8ecfcfeSchristos 2794b8ecfcfeSchristos int 2795b8ecfcfeSchristos event_del_block(struct event *ev) 2796b8ecfcfeSchristos { 2797b8ecfcfeSchristos return event_del_(ev, EVENT_DEL_BLOCK); 2798b8ecfcfeSchristos } 2799b8ecfcfeSchristos 2800b8ecfcfeSchristos int 2801b8ecfcfeSchristos event_del_noblock(struct event *ev) 2802b8ecfcfeSchristos { 2803b8ecfcfeSchristos return event_del_(ev, EVENT_DEL_NOBLOCK); 2804b8ecfcfeSchristos } 2805b8ecfcfeSchristos 2806b8ecfcfeSchristos /** Helper for event_del: always called with th_base_lock held. 2807b8ecfcfeSchristos * 2808b8ecfcfeSchristos * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK, 2809b8ecfcfeSchristos * EVEN_IF_FINALIZING} values. See those for more information. 2810b8ecfcfeSchristos */ 2811b8ecfcfeSchristos int 2812b8ecfcfeSchristos event_del_nolock_(struct event *ev, int blocking) 28138585484eSchristos { 28148585484eSchristos struct event_base *base; 28158585484eSchristos int res = 0, notify = 0; 28168585484eSchristos 28178585484eSchristos event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p", 28188585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback)); 28198585484eSchristos 28208585484eSchristos /* An event without a base has not been added */ 28218585484eSchristos if (ev->ev_base == NULL) 28228585484eSchristos return (-1); 28238585484eSchristos 28248585484eSchristos EVENT_BASE_ASSERT_LOCKED(ev->ev_base); 28258585484eSchristos 2826b8ecfcfeSchristos if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) { 2827b8ecfcfeSchristos if (ev->ev_flags & EVLIST_FINALIZING) { 2828b8ecfcfeSchristos /* XXXX Debug */ 2829b8ecfcfeSchristos return 0; 2830b8ecfcfeSchristos } 2831b8ecfcfeSchristos } 2832b8ecfcfeSchristos 28338585484eSchristos base = ev->ev_base; 28348585484eSchristos 28358585484eSchristos EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); 28368585484eSchristos 28378585484eSchristos /* See if we are just active executing this event in a loop */ 28388585484eSchristos if (ev->ev_events & EV_SIGNAL) { 28398585484eSchristos if (ev->ev_ncalls && ev->ev_pncalls) { 28408585484eSchristos /* Abort loop */ 28418585484eSchristos *ev->ev_pncalls = 0; 28428585484eSchristos } 28438585484eSchristos } 28448585484eSchristos 28458585484eSchristos if (ev->ev_flags & EVLIST_TIMEOUT) { 28468585484eSchristos /* NOTE: We never need to notify the main thread because of a 28478585484eSchristos * deleted timeout event: all that could happen if we don't is 28488585484eSchristos * that the dispatch loop might wake up too early. But the 28498585484eSchristos * point of notifying the main thread _is_ to wake up the 28508585484eSchristos * dispatch loop early anyway, so we wouldn't gain anything by 28518585484eSchristos * doing it. 28528585484eSchristos */ 28538585484eSchristos event_queue_remove_timeout(base, ev); 28548585484eSchristos } 28558585484eSchristos 28568585484eSchristos if (ev->ev_flags & EVLIST_ACTIVE) 28578585484eSchristos event_queue_remove_active(base, event_to_event_callback(ev)); 28588585484eSchristos else if (ev->ev_flags & EVLIST_ACTIVE_LATER) 28598585484eSchristos event_queue_remove_active_later(base, event_to_event_callback(ev)); 28608585484eSchristos 28618585484eSchristos if (ev->ev_flags & EVLIST_INSERTED) { 28628585484eSchristos event_queue_remove_inserted(base, ev); 2863b8ecfcfeSchristos if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) 28648585484eSchristos res = evmap_io_del_(base, ev->ev_fd, ev); 28658585484eSchristos else 28668585484eSchristos res = evmap_signal_del_(base, (int)ev->ev_fd, ev); 28678585484eSchristos if (res == 1) { 28688585484eSchristos /* evmap says we need to notify the main thread. */ 28698585484eSchristos notify = 1; 28708585484eSchristos res = 0; 28718585484eSchristos } 2872*eabc0478Schristos /* If we do not have events, let's notify event base so it can 2873*eabc0478Schristos * exit without waiting */ 2874*eabc0478Schristos if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) 2875*eabc0478Schristos notify = 1; 28768585484eSchristos } 28778585484eSchristos 28788585484eSchristos /* if we are not in the right thread, we need to wake up the loop */ 28798585484eSchristos if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) 28808585484eSchristos evthread_notify_base(base); 28818585484eSchristos 28828585484eSchristos event_debug_note_del_(ev); 28838585484eSchristos 2884*eabc0478Schristos /* If the main thread is currently executing this event's callback, 2885*eabc0478Schristos * and we are not the main thread, then we want to wait until the 2886*eabc0478Schristos * callback is done before returning. That way, when this function 2887*eabc0478Schristos * returns, it will be safe to free the user-supplied argument. 2888*eabc0478Schristos */ 2889*eabc0478Schristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 2890*eabc0478Schristos if (blocking != EVENT_DEL_NOBLOCK && 2891*eabc0478Schristos base->current_event == event_to_event_callback(ev) && 2892*eabc0478Schristos !EVBASE_IN_THREAD(base) && 2893*eabc0478Schristos (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) { 2894*eabc0478Schristos ++base->current_event_waiters; 2895*eabc0478Schristos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 2896*eabc0478Schristos } 2897*eabc0478Schristos #endif 2898*eabc0478Schristos 28998585484eSchristos return (res); 29008585484eSchristos } 29018585484eSchristos 29028585484eSchristos void 29038585484eSchristos event_active(struct event *ev, int res, short ncalls) 29048585484eSchristos { 29058585484eSchristos if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 29068585484eSchristos event_warnx("%s: event has no event_base set.", __func__); 29078585484eSchristos return; 29088585484eSchristos } 29098585484eSchristos 29108585484eSchristos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 29118585484eSchristos 29128585484eSchristos event_debug_assert_is_setup_(ev); 29138585484eSchristos 29148585484eSchristos event_active_nolock_(ev, res, ncalls); 29158585484eSchristos 29168585484eSchristos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 29178585484eSchristos } 29188585484eSchristos 29198585484eSchristos 29208585484eSchristos void 29218585484eSchristos event_active_nolock_(struct event *ev, int res, short ncalls) 29228585484eSchristos { 29238585484eSchristos struct event_base *base; 29248585484eSchristos 29258585484eSchristos event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p", 29268585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); 29278585484eSchristos 29288585484eSchristos base = ev->ev_base; 29298585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 29308585484eSchristos 2931b8ecfcfeSchristos if (ev->ev_flags & EVLIST_FINALIZING) { 2932b8ecfcfeSchristos /* XXXX debug */ 2933b8ecfcfeSchristos return; 2934b8ecfcfeSchristos } 2935b8ecfcfeSchristos 29368585484eSchristos switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 29378585484eSchristos default: 29388585484eSchristos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: 29398585484eSchristos EVUTIL_ASSERT(0); 29408585484eSchristos break; 29418585484eSchristos case EVLIST_ACTIVE: 29428585484eSchristos /* We get different kinds of events, add them together */ 29438585484eSchristos ev->ev_res |= res; 29448585484eSchristos return; 29458585484eSchristos case EVLIST_ACTIVE_LATER: 29468585484eSchristos ev->ev_res |= res; 29478585484eSchristos break; 29488585484eSchristos case 0: 29498585484eSchristos ev->ev_res = res; 29508585484eSchristos break; 29518585484eSchristos } 29528585484eSchristos 29538585484eSchristos if (ev->ev_pri < base->event_running_priority) 29548585484eSchristos base->event_continue = 1; 29558585484eSchristos 29568585484eSchristos if (ev->ev_events & EV_SIGNAL) { 29578585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 29588585484eSchristos if (base->current_event == event_to_event_callback(ev) && 29598585484eSchristos !EVBASE_IN_THREAD(base)) { 29608585484eSchristos ++base->current_event_waiters; 29618585484eSchristos EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 29628585484eSchristos } 29638585484eSchristos #endif 29648585484eSchristos ev->ev_ncalls = ncalls; 29658585484eSchristos ev->ev_pncalls = NULL; 29668585484eSchristos } 29678585484eSchristos 29688585484eSchristos event_callback_activate_nolock_(base, event_to_event_callback(ev)); 29698585484eSchristos } 29708585484eSchristos 29718585484eSchristos void 29728585484eSchristos event_active_later_(struct event *ev, int res) 29738585484eSchristos { 29748585484eSchristos EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 29758585484eSchristos event_active_later_nolock_(ev, res); 29768585484eSchristos EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 29778585484eSchristos } 29788585484eSchristos 29798585484eSchristos void 29808585484eSchristos event_active_later_nolock_(struct event *ev, int res) 29818585484eSchristos { 29828585484eSchristos struct event_base *base = ev->ev_base; 29838585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 29848585484eSchristos 29858585484eSchristos if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { 29868585484eSchristos /* We get different kinds of events, add them together */ 29878585484eSchristos ev->ev_res |= res; 29888585484eSchristos return; 29898585484eSchristos } 29908585484eSchristos 29918585484eSchristos ev->ev_res = res; 29928585484eSchristos 29938585484eSchristos event_callback_activate_later_nolock_(base, event_to_event_callback(ev)); 29948585484eSchristos } 29958585484eSchristos 29968585484eSchristos int 29978585484eSchristos event_callback_activate_(struct event_base *base, 29988585484eSchristos struct event_callback *evcb) 29998585484eSchristos { 30008585484eSchristos int r; 30018585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 30028585484eSchristos r = event_callback_activate_nolock_(base, evcb); 30038585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 30048585484eSchristos return r; 30058585484eSchristos } 30068585484eSchristos 30078585484eSchristos int 30088585484eSchristos event_callback_activate_nolock_(struct event_base *base, 30098585484eSchristos struct event_callback *evcb) 30108585484eSchristos { 30118585484eSchristos int r = 1; 30128585484eSchristos 3013b8ecfcfeSchristos if (evcb->evcb_flags & EVLIST_FINALIZING) 3014b8ecfcfeSchristos return 0; 3015b8ecfcfeSchristos 30168585484eSchristos switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { 30178585484eSchristos default: 30188585484eSchristos EVUTIL_ASSERT(0); 3019*eabc0478Schristos EVUTIL_FALLTHROUGH; 30208585484eSchristos case EVLIST_ACTIVE_LATER: 30218585484eSchristos event_queue_remove_active_later(base, evcb); 30228585484eSchristos r = 0; 30238585484eSchristos break; 30248585484eSchristos case EVLIST_ACTIVE: 30258585484eSchristos return 0; 30268585484eSchristos case 0: 30278585484eSchristos break; 30288585484eSchristos } 30298585484eSchristos 30308585484eSchristos event_queue_insert_active(base, evcb); 30318585484eSchristos 30328585484eSchristos if (EVBASE_NEED_NOTIFY(base)) 30338585484eSchristos evthread_notify_base(base); 30348585484eSchristos 30358585484eSchristos return r; 30368585484eSchristos } 30378585484eSchristos 3038*eabc0478Schristos int 30398585484eSchristos event_callback_activate_later_nolock_(struct event_base *base, 30408585484eSchristos struct event_callback *evcb) 30418585484eSchristos { 30428585484eSchristos if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) 3043*eabc0478Schristos return 0; 30448585484eSchristos 30458585484eSchristos event_queue_insert_active_later(base, evcb); 30468585484eSchristos if (EVBASE_NEED_NOTIFY(base)) 30478585484eSchristos evthread_notify_base(base); 3048*eabc0478Schristos return 1; 30498585484eSchristos } 30508585484eSchristos 30518585484eSchristos void 30528585484eSchristos event_callback_init_(struct event_base *base, 30538585484eSchristos struct event_callback *cb) 30548585484eSchristos { 30558585484eSchristos memset(cb, 0, sizeof(*cb)); 30568585484eSchristos cb->evcb_pri = base->nactivequeues - 1; 30578585484eSchristos } 30588585484eSchristos 30598585484eSchristos int 30608585484eSchristos event_callback_cancel_(struct event_base *base, 30618585484eSchristos struct event_callback *evcb) 30628585484eSchristos { 30638585484eSchristos int r; 30648585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3065b8ecfcfeSchristos r = event_callback_cancel_nolock_(base, evcb, 0); 30668585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 30678585484eSchristos return r; 30688585484eSchristos } 30698585484eSchristos 30708585484eSchristos int 30718585484eSchristos event_callback_cancel_nolock_(struct event_base *base, 3072b8ecfcfeSchristos struct event_callback *evcb, int even_if_finalizing) 30738585484eSchristos { 3074b8ecfcfeSchristos if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing) 3075b8ecfcfeSchristos return 0; 3076b8ecfcfeSchristos 30778585484eSchristos if (evcb->evcb_flags & EVLIST_INIT) 3078b8ecfcfeSchristos return event_del_nolock_(event_callback_to_event(evcb), 3079b8ecfcfeSchristos even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK); 30808585484eSchristos 30818585484eSchristos switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 30828585484eSchristos default: 30838585484eSchristos case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: 30848585484eSchristos EVUTIL_ASSERT(0); 30858585484eSchristos break; 30868585484eSchristos case EVLIST_ACTIVE: 30878585484eSchristos /* We get different kinds of events, add them together */ 30888585484eSchristos event_queue_remove_active(base, evcb); 30898585484eSchristos return 0; 30908585484eSchristos case EVLIST_ACTIVE_LATER: 30918585484eSchristos event_queue_remove_active_later(base, evcb); 30928585484eSchristos break; 30938585484eSchristos case 0: 30948585484eSchristos break; 30958585484eSchristos } 30968585484eSchristos 30978585484eSchristos return 0; 30988585484eSchristos } 30998585484eSchristos 31008585484eSchristos void 31018585484eSchristos event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg) 31028585484eSchristos { 31038585484eSchristos memset(cb, 0, sizeof(*cb)); 31048585484eSchristos cb->evcb_cb_union.evcb_selfcb = fn; 31058585484eSchristos cb->evcb_arg = arg; 31068585484eSchristos cb->evcb_pri = priority; 31078585484eSchristos cb->evcb_closure = EV_CLOSURE_CB_SELF; 31088585484eSchristos } 31098585484eSchristos 31108585484eSchristos void 31118585484eSchristos event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority) 31128585484eSchristos { 31138585484eSchristos cb->evcb_pri = priority; 31148585484eSchristos } 31158585484eSchristos 31168585484eSchristos void 31178585484eSchristos event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb) 31188585484eSchristos { 31198585484eSchristos if (!base) 31208585484eSchristos base = current_base; 31218585484eSchristos event_callback_cancel_(base, cb); 31228585484eSchristos } 31238585484eSchristos 31248585484eSchristos #define MAX_DEFERREDS_QUEUED 32 31258585484eSchristos int 31268585484eSchristos event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb) 31278585484eSchristos { 31288585484eSchristos int r = 1; 31298585484eSchristos if (!base) 31308585484eSchristos base = current_base; 31318585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 31328585484eSchristos if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) { 3133*eabc0478Schristos r = event_callback_activate_later_nolock_(base, cb); 31348585484eSchristos } else { 31358585484eSchristos r = event_callback_activate_nolock_(base, cb); 3136*eabc0478Schristos if (r) { 3137*eabc0478Schristos ++base->n_deferreds_queued; 3138*eabc0478Schristos } 31398585484eSchristos } 31408585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 31418585484eSchristos return r; 31428585484eSchristos } 31438585484eSchristos 31448585484eSchristos static int 31458585484eSchristos timeout_next(struct event_base *base, struct timeval **tv_p) 31468585484eSchristos { 31478585484eSchristos /* Caller must hold th_base_lock */ 31488585484eSchristos struct timeval now; 31498585484eSchristos struct event *ev; 31508585484eSchristos struct timeval *tv = *tv_p; 31518585484eSchristos int res = 0; 31528585484eSchristos 31538585484eSchristos ev = min_heap_top_(&base->timeheap); 31548585484eSchristos 31558585484eSchristos if (ev == NULL) { 31568585484eSchristos /* if no time-based events are active wait for I/O */ 31578585484eSchristos *tv_p = NULL; 31588585484eSchristos goto out; 31598585484eSchristos } 31608585484eSchristos 31618585484eSchristos if (gettime(base, &now) == -1) { 31628585484eSchristos res = -1; 31638585484eSchristos goto out; 31648585484eSchristos } 31658585484eSchristos 31668585484eSchristos if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { 31678585484eSchristos evutil_timerclear(tv); 31688585484eSchristos goto out; 31698585484eSchristos } 31708585484eSchristos 31718585484eSchristos evutil_timersub(&ev->ev_timeout, &now, tv); 31728585484eSchristos 31738585484eSchristos EVUTIL_ASSERT(tv->tv_sec >= 0); 31748585484eSchristos EVUTIL_ASSERT(tv->tv_usec >= 0); 31758585484eSchristos event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec)); 31768585484eSchristos 31778585484eSchristos out: 31788585484eSchristos return (res); 31798585484eSchristos } 31808585484eSchristos 31818585484eSchristos /* Activate every event whose timeout has elapsed. */ 31828585484eSchristos static void 31838585484eSchristos timeout_process(struct event_base *base) 31848585484eSchristos { 31858585484eSchristos /* Caller must hold lock. */ 31868585484eSchristos struct timeval now; 31878585484eSchristos struct event *ev; 31888585484eSchristos 31898585484eSchristos if (min_heap_empty_(&base->timeheap)) { 31908585484eSchristos return; 31918585484eSchristos } 31928585484eSchristos 31938585484eSchristos gettime(base, &now); 31948585484eSchristos 31958585484eSchristos while ((ev = min_heap_top_(&base->timeheap))) { 31968585484eSchristos if (evutil_timercmp(&ev->ev_timeout, &now, >)) 31978585484eSchristos break; 31988585484eSchristos 31998585484eSchristos /* delete this event from the I/O queues */ 3200b8ecfcfeSchristos event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 32018585484eSchristos 32028585484eSchristos event_debug(("timeout_process: event: %p, call %p", 32038585484eSchristos ev, ev->ev_callback)); 32048585484eSchristos event_active_nolock_(ev, EV_TIMEOUT, 1); 32058585484eSchristos } 32068585484eSchristos } 32078585484eSchristos 3208b8ecfcfeSchristos #ifndef MAX 3209b8ecfcfeSchristos #define MAX(a,b) (((a)>(b))?(a):(b)) 3210b8ecfcfeSchristos #endif 3211b8ecfcfeSchristos 3212b8ecfcfeSchristos #define MAX_EVENT_COUNT(var, v) var = MAX(var, v) 3213b8ecfcfeSchristos 32148585484eSchristos /* These are a fancy way to spell 3215*eabc0478Schristos if (~flags & EVLIST_INTERNAL) 32168585484eSchristos base->event_count--/++; 32178585484eSchristos */ 32188585484eSchristos #define DECR_EVENT_COUNT(base,flags) \ 3219*eabc0478Schristos ((base)->event_count -= !((flags) & EVLIST_INTERNAL)) 3220b8ecfcfeSchristos #define INCR_EVENT_COUNT(base,flags) do { \ 3221*eabc0478Schristos ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \ 3222b8ecfcfeSchristos MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \ 3223b8ecfcfeSchristos } while (0) 32248585484eSchristos 32258585484eSchristos static void 32268585484eSchristos event_queue_remove_inserted(struct event_base *base, struct event *ev) 32278585484eSchristos { 32288585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 32298585484eSchristos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) { 32308585484eSchristos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, 32318585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED); 32328585484eSchristos return; 32338585484eSchristos } 32348585484eSchristos DECR_EVENT_COUNT(base, ev->ev_flags); 32358585484eSchristos ev->ev_flags &= ~EVLIST_INSERTED; 32368585484eSchristos } 32378585484eSchristos static void 32388585484eSchristos event_queue_remove_active(struct event_base *base, struct event_callback *evcb) 32398585484eSchristos { 32408585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 32418585484eSchristos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) { 32428585484eSchristos event_errx(1, "%s: %p not on queue %x", __func__, 32438585484eSchristos evcb, EVLIST_ACTIVE); 32448585484eSchristos return; 32458585484eSchristos } 32468585484eSchristos DECR_EVENT_COUNT(base, evcb->evcb_flags); 32478585484eSchristos evcb->evcb_flags &= ~EVLIST_ACTIVE; 32488585484eSchristos base->event_count_active--; 32498585484eSchristos 32508585484eSchristos TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri], 32518585484eSchristos evcb, evcb_active_next); 32528585484eSchristos } 32538585484eSchristos static void 32548585484eSchristos event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb) 32558585484eSchristos { 32568585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 32578585484eSchristos if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) { 32588585484eSchristos event_errx(1, "%s: %p not on queue %x", __func__, 32598585484eSchristos evcb, EVLIST_ACTIVE_LATER); 32608585484eSchristos return; 32618585484eSchristos } 32628585484eSchristos DECR_EVENT_COUNT(base, evcb->evcb_flags); 32638585484eSchristos evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER; 32648585484eSchristos base->event_count_active--; 32658585484eSchristos 32668585484eSchristos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); 32678585484eSchristos } 32688585484eSchristos static void 32698585484eSchristos event_queue_remove_timeout(struct event_base *base, struct event *ev) 32708585484eSchristos { 32718585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 32728585484eSchristos if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) { 32738585484eSchristos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, 32748585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT); 32758585484eSchristos return; 32768585484eSchristos } 32778585484eSchristos DECR_EVENT_COUNT(base, ev->ev_flags); 32788585484eSchristos ev->ev_flags &= ~EVLIST_TIMEOUT; 32798585484eSchristos 32808585484eSchristos if (is_common_timeout(&ev->ev_timeout, base)) { 32818585484eSchristos struct common_timeout_list *ctl = 32828585484eSchristos get_common_timeout_list(base, &ev->ev_timeout); 32838585484eSchristos TAILQ_REMOVE(&ctl->events, ev, 32848585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 32858585484eSchristos } else { 32868585484eSchristos min_heap_erase_(&base->timeheap, ev); 32878585484eSchristos } 32888585484eSchristos } 32898585484eSchristos 32908585484eSchristos #ifdef USE_REINSERT_TIMEOUT 32918585484eSchristos /* Remove and reinsert 'ev' into the timeout queue. */ 32928585484eSchristos static void 32938585484eSchristos event_queue_reinsert_timeout(struct event_base *base, struct event *ev, 32948585484eSchristos int was_common, int is_common, int old_timeout_idx) 32958585484eSchristos { 32968585484eSchristos struct common_timeout_list *ctl; 32978585484eSchristos if (!(ev->ev_flags & EVLIST_TIMEOUT)) { 32988585484eSchristos event_queue_insert_timeout(base, ev); 32998585484eSchristos return; 33008585484eSchristos } 33018585484eSchristos 33028585484eSchristos switch ((was_common<<1) | is_common) { 33038585484eSchristos case 3: /* Changing from one common timeout to another */ 33048585484eSchristos ctl = base->common_timeout_queues[old_timeout_idx]; 33058585484eSchristos TAILQ_REMOVE(&ctl->events, ev, 33068585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 33078585484eSchristos ctl = get_common_timeout_list(base, &ev->ev_timeout); 33088585484eSchristos insert_common_timeout_inorder(ctl, ev); 33098585484eSchristos break; 33108585484eSchristos case 2: /* Was common; is no longer common */ 33118585484eSchristos ctl = base->common_timeout_queues[old_timeout_idx]; 33128585484eSchristos TAILQ_REMOVE(&ctl->events, ev, 33138585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 33148585484eSchristos min_heap_push_(&base->timeheap, ev); 33158585484eSchristos break; 33168585484eSchristos case 1: /* Wasn't common; has become common. */ 33178585484eSchristos min_heap_erase_(&base->timeheap, ev); 33188585484eSchristos ctl = get_common_timeout_list(base, &ev->ev_timeout); 33198585484eSchristos insert_common_timeout_inorder(ctl, ev); 33208585484eSchristos break; 33218585484eSchristos case 0: /* was in heap; is still on heap. */ 33228585484eSchristos min_heap_adjust_(&base->timeheap, ev); 33238585484eSchristos break; 33248585484eSchristos default: 33258585484eSchristos EVUTIL_ASSERT(0); /* unreachable */ 33268585484eSchristos break; 33278585484eSchristos } 33288585484eSchristos } 33298585484eSchristos #endif 33308585484eSchristos 33318585484eSchristos /* Add 'ev' to the common timeout list in 'ev'. */ 33328585484eSchristos static void 33338585484eSchristos insert_common_timeout_inorder(struct common_timeout_list *ctl, 33348585484eSchristos struct event *ev) 33358585484eSchristos { 33368585484eSchristos struct event *e; 33378585484eSchristos /* By all logic, we should just be able to append 'ev' to the end of 33388585484eSchristos * ctl->events, since the timeout on each 'ev' is set to {the common 33398585484eSchristos * timeout} + {the time when we add the event}, and so the events 33408585484eSchristos * should arrive in order of their timeeouts. But just in case 33418585484eSchristos * there's some wacky threading issue going on, we do a search from 33428585484eSchristos * the end of 'ev' to find the right insertion point. 33438585484eSchristos */ 33448585484eSchristos TAILQ_FOREACH_REVERSE(e, &ctl->events, 33458585484eSchristos event_list, ev_timeout_pos.ev_next_with_common_timeout) { 33468585484eSchristos /* This timercmp is a little sneaky, since both ev and e have 33478585484eSchristos * magic values in tv_usec. Fortunately, they ought to have 33488585484eSchristos * the _same_ magic values in tv_usec. Let's assert for that. 33498585484eSchristos */ 33508585484eSchristos EVUTIL_ASSERT( 33518585484eSchristos is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout)); 33528585484eSchristos if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) { 33538585484eSchristos TAILQ_INSERT_AFTER(&ctl->events, e, ev, 33548585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 33558585484eSchristos return; 33568585484eSchristos } 33578585484eSchristos } 33588585484eSchristos TAILQ_INSERT_HEAD(&ctl->events, ev, 33598585484eSchristos ev_timeout_pos.ev_next_with_common_timeout); 33608585484eSchristos } 33618585484eSchristos 33628585484eSchristos static void 33638585484eSchristos event_queue_insert_inserted(struct event_base *base, struct event *ev) 33648585484eSchristos { 33658585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 33668585484eSchristos 33678585484eSchristos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) { 33688585484eSchristos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__, 33698585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd)); 33708585484eSchristos return; 33718585484eSchristos } 33728585484eSchristos 33738585484eSchristos INCR_EVENT_COUNT(base, ev->ev_flags); 33748585484eSchristos 33758585484eSchristos ev->ev_flags |= EVLIST_INSERTED; 33768585484eSchristos } 33778585484eSchristos 33788585484eSchristos static void 33798585484eSchristos event_queue_insert_active(struct event_base *base, struct event_callback *evcb) 33808585484eSchristos { 33818585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 33828585484eSchristos 33838585484eSchristos if (evcb->evcb_flags & EVLIST_ACTIVE) { 33848585484eSchristos /* Double insertion is possible for active events */ 33858585484eSchristos return; 33868585484eSchristos } 33878585484eSchristos 33888585484eSchristos INCR_EVENT_COUNT(base, evcb->evcb_flags); 33898585484eSchristos 33908585484eSchristos evcb->evcb_flags |= EVLIST_ACTIVE; 33918585484eSchristos 33928585484eSchristos base->event_count_active++; 3393b8ecfcfeSchristos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); 33948585484eSchristos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 33958585484eSchristos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], 33968585484eSchristos evcb, evcb_active_next); 33978585484eSchristos } 33988585484eSchristos 33998585484eSchristos static void 34008585484eSchristos event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb) 34018585484eSchristos { 34028585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 34038585484eSchristos if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) { 34048585484eSchristos /* Double insertion is possible */ 34058585484eSchristos return; 34068585484eSchristos } 34078585484eSchristos 34088585484eSchristos INCR_EVENT_COUNT(base, evcb->evcb_flags); 34098585484eSchristos evcb->evcb_flags |= EVLIST_ACTIVE_LATER; 34108585484eSchristos base->event_count_active++; 3411b8ecfcfeSchristos MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); 34128585484eSchristos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 34138585484eSchristos TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next); 34148585484eSchristos } 34158585484eSchristos 34168585484eSchristos static void 34178585484eSchristos event_queue_insert_timeout(struct event_base *base, struct event *ev) 34188585484eSchristos { 34198585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 34208585484eSchristos 34218585484eSchristos if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) { 34228585484eSchristos event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__, 34238585484eSchristos ev, EV_SOCK_ARG(ev->ev_fd)); 34248585484eSchristos return; 34258585484eSchristos } 34268585484eSchristos 34278585484eSchristos INCR_EVENT_COUNT(base, ev->ev_flags); 34288585484eSchristos 34298585484eSchristos ev->ev_flags |= EVLIST_TIMEOUT; 34308585484eSchristos 34318585484eSchristos if (is_common_timeout(&ev->ev_timeout, base)) { 34328585484eSchristos struct common_timeout_list *ctl = 34338585484eSchristos get_common_timeout_list(base, &ev->ev_timeout); 34348585484eSchristos insert_common_timeout_inorder(ctl, ev); 34358585484eSchristos } else { 34368585484eSchristos min_heap_push_(&base->timeheap, ev); 34378585484eSchristos } 34388585484eSchristos } 34398585484eSchristos 34408585484eSchristos static void 34418585484eSchristos event_queue_make_later_events_active(struct event_base *base) 34428585484eSchristos { 34438585484eSchristos struct event_callback *evcb; 34448585484eSchristos EVENT_BASE_ASSERT_LOCKED(base); 34458585484eSchristos 34468585484eSchristos while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { 34478585484eSchristos TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); 34488585484eSchristos evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; 34498585484eSchristos EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 34508585484eSchristos TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); 34518585484eSchristos base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); 34528585484eSchristos } 34538585484eSchristos } 34548585484eSchristos 34558585484eSchristos /* Functions for debugging */ 34568585484eSchristos 34578585484eSchristos const char * 34588585484eSchristos event_get_version(void) 34598585484eSchristos { 34608585484eSchristos return (EVENT__VERSION); 34618585484eSchristos } 34628585484eSchristos 34638585484eSchristos ev_uint32_t 34648585484eSchristos event_get_version_number(void) 34658585484eSchristos { 34668585484eSchristos return (EVENT__NUMERIC_VERSION); 34678585484eSchristos } 34688585484eSchristos 34698585484eSchristos /* 34708585484eSchristos * No thread-safe interface needed - the information should be the same 34718585484eSchristos * for all threads. 34728585484eSchristos */ 34738585484eSchristos 34748585484eSchristos const char * 34758585484eSchristos event_get_method(void) 34768585484eSchristos { 34778585484eSchristos return (current_base->evsel->name); 34788585484eSchristos } 34798585484eSchristos 34808585484eSchristos #ifndef EVENT__DISABLE_MM_REPLACEMENT 34818585484eSchristos static void *(*mm_malloc_fn_)(size_t sz) = NULL; 34828585484eSchristos static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL; 34838585484eSchristos static void (*mm_free_fn_)(void *p) = NULL; 34848585484eSchristos 34858585484eSchristos void * 34868585484eSchristos event_mm_malloc_(size_t sz) 34878585484eSchristos { 34888585484eSchristos if (sz == 0) 34898585484eSchristos return NULL; 34908585484eSchristos 34918585484eSchristos if (mm_malloc_fn_) 34928585484eSchristos return mm_malloc_fn_(sz); 34938585484eSchristos else 34948585484eSchristos return malloc(sz); 34958585484eSchristos } 34968585484eSchristos 34978585484eSchristos void * 34988585484eSchristos event_mm_calloc_(size_t count, size_t size) 34998585484eSchristos { 35008585484eSchristos if (count == 0 || size == 0) 35018585484eSchristos return NULL; 35028585484eSchristos 35038585484eSchristos if (mm_malloc_fn_) { 35048585484eSchristos size_t sz = count * size; 35058585484eSchristos void *p = NULL; 35068585484eSchristos if (count > EV_SIZE_MAX / size) 35078585484eSchristos goto error; 35088585484eSchristos p = mm_malloc_fn_(sz); 35098585484eSchristos if (p) 35108585484eSchristos return memset(p, 0, sz); 35118585484eSchristos } else { 35128585484eSchristos void *p = calloc(count, size); 35138585484eSchristos #ifdef _WIN32 35148585484eSchristos /* Windows calloc doesn't reliably set ENOMEM */ 35158585484eSchristos if (p == NULL) 35168585484eSchristos goto error; 35178585484eSchristos #endif 35188585484eSchristos return p; 35198585484eSchristos } 35208585484eSchristos 35218585484eSchristos error: 35228585484eSchristos errno = ENOMEM; 35238585484eSchristos return NULL; 35248585484eSchristos } 35258585484eSchristos 35268585484eSchristos char * 35278585484eSchristos event_mm_strdup_(const char *str) 35288585484eSchristos { 35298585484eSchristos if (!str) { 35308585484eSchristos errno = EINVAL; 35318585484eSchristos return NULL; 35328585484eSchristos } 35338585484eSchristos 35348585484eSchristos if (mm_malloc_fn_) { 35358585484eSchristos size_t ln = strlen(str); 35368585484eSchristos void *p = NULL; 35378585484eSchristos if (ln == EV_SIZE_MAX) 35388585484eSchristos goto error; 35398585484eSchristos p = mm_malloc_fn_(ln+1); 35408585484eSchristos if (p) 35418585484eSchristos return memcpy(p, str, ln+1); 35428585484eSchristos } else 35438585484eSchristos #ifdef _WIN32 35448585484eSchristos return _strdup(str); 35458585484eSchristos #else 35468585484eSchristos return strdup(str); 35478585484eSchristos #endif 35488585484eSchristos 35498585484eSchristos error: 35508585484eSchristos errno = ENOMEM; 35518585484eSchristos return NULL; 35528585484eSchristos } 35538585484eSchristos 35548585484eSchristos void * 35558585484eSchristos event_mm_realloc_(void *ptr, size_t sz) 35568585484eSchristos { 35578585484eSchristos if (mm_realloc_fn_) 35588585484eSchristos return mm_realloc_fn_(ptr, sz); 35598585484eSchristos else 35608585484eSchristos return realloc(ptr, sz); 35618585484eSchristos } 35628585484eSchristos 35638585484eSchristos void 35648585484eSchristos event_mm_free_(void *ptr) 35658585484eSchristos { 35668585484eSchristos if (mm_free_fn_) 35678585484eSchristos mm_free_fn_(ptr); 35688585484eSchristos else 35698585484eSchristos free(ptr); 35708585484eSchristos } 35718585484eSchristos 35728585484eSchristos void 35738585484eSchristos event_set_mem_functions(void *(*malloc_fn)(size_t sz), 35748585484eSchristos void *(*realloc_fn)(void *ptr, size_t sz), 35758585484eSchristos void (*free_fn)(void *ptr)) 35768585484eSchristos { 35778585484eSchristos mm_malloc_fn_ = malloc_fn; 35788585484eSchristos mm_realloc_fn_ = realloc_fn; 35798585484eSchristos mm_free_fn_ = free_fn; 35808585484eSchristos } 35818585484eSchristos #endif 35828585484eSchristos 35838585484eSchristos #ifdef EVENT__HAVE_EVENTFD 35848585484eSchristos static void 35858585484eSchristos evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg) 35868585484eSchristos { 35878585484eSchristos ev_uint64_t msg; 35888585484eSchristos ev_ssize_t r; 35898585484eSchristos struct event_base *base = arg; 35908585484eSchristos 35918585484eSchristos r = read(fd, (void*) &msg, sizeof(msg)); 35928585484eSchristos if (r<0 && errno != EAGAIN) { 35938585484eSchristos event_sock_warn(fd, "Error reading from eventfd"); 35948585484eSchristos } 35958585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 35968585484eSchristos base->is_notify_pending = 0; 35978585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 35988585484eSchristos } 35998585484eSchristos #endif 36008585484eSchristos 36018585484eSchristos static void 36028585484eSchristos evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg) 36038585484eSchristos { 36048585484eSchristos unsigned char buf[1024]; 36058585484eSchristos struct event_base *base = arg; 36068585484eSchristos #ifdef _WIN32 36078585484eSchristos while (recv(fd, (char*)buf, sizeof(buf), 0) > 0) 36088585484eSchristos ; 36098585484eSchristos #else 36108585484eSchristos while (read(fd, (char*)buf, sizeof(buf)) > 0) 36118585484eSchristos ; 36128585484eSchristos #endif 36138585484eSchristos 36148585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 36158585484eSchristos base->is_notify_pending = 0; 36168585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 36178585484eSchristos } 36188585484eSchristos 36198585484eSchristos int 36208585484eSchristos evthread_make_base_notifiable(struct event_base *base) 36218585484eSchristos { 36228585484eSchristos int r; 36238585484eSchristos if (!base) 36248585484eSchristos return -1; 36258585484eSchristos 36268585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 36278585484eSchristos r = evthread_make_base_notifiable_nolock_(base); 36288585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 36298585484eSchristos return r; 36308585484eSchristos } 36318585484eSchristos 36328585484eSchristos static int 36338585484eSchristos evthread_make_base_notifiable_nolock_(struct event_base *base) 36348585484eSchristos { 36358585484eSchristos void (*cb)(evutil_socket_t, short, void *); 36368585484eSchristos int (*notify)(struct event_base *); 36378585484eSchristos 36388585484eSchristos if (base->th_notify_fn != NULL) { 36398585484eSchristos /* The base is already notifiable: we're doing fine. */ 36408585484eSchristos return 0; 36418585484eSchristos } 36428585484eSchristos 36438585484eSchristos #if defined(EVENT__HAVE_WORKING_KQUEUE) 36448585484eSchristos if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) { 36458585484eSchristos base->th_notify_fn = event_kq_notify_base_; 36468585484eSchristos /* No need to add an event here; the backend can wake 36478585484eSchristos * itself up just fine. */ 36488585484eSchristos return 0; 36498585484eSchristos } 36508585484eSchristos #endif 36518585484eSchristos 36528585484eSchristos #ifdef EVENT__HAVE_EVENTFD 36538585484eSchristos base->th_notify_fd[0] = evutil_eventfd_(0, 36548585484eSchristos EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK); 36558585484eSchristos if (base->th_notify_fd[0] >= 0) { 36568585484eSchristos base->th_notify_fd[1] = -1; 36578585484eSchristos notify = evthread_notify_base_eventfd; 36588585484eSchristos cb = evthread_notify_drain_eventfd; 36598585484eSchristos } else 36608585484eSchristos #endif 36618585484eSchristos if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) { 36628585484eSchristos notify = evthread_notify_base_default; 36638585484eSchristos cb = evthread_notify_drain_default; 36648585484eSchristos } else { 36658585484eSchristos return -1; 36668585484eSchristos } 36678585484eSchristos 36688585484eSchristos base->th_notify_fn = notify; 36698585484eSchristos 36708585484eSchristos /* prepare an event that we can use for wakeup */ 36718585484eSchristos event_assign(&base->th_notify, base, base->th_notify_fd[0], 36728585484eSchristos EV_READ|EV_PERSIST, cb, base); 36738585484eSchristos 36748585484eSchristos /* we need to mark this as internal event */ 36758585484eSchristos base->th_notify.ev_flags |= EVLIST_INTERNAL; 36768585484eSchristos event_priority_set(&base->th_notify, 0); 36778585484eSchristos 36788585484eSchristos return event_add_nolock_(&base->th_notify, NULL, 0); 36798585484eSchristos } 36808585484eSchristos 36818585484eSchristos int 36828585484eSchristos event_base_foreach_event_nolock_(struct event_base *base, 36838585484eSchristos event_base_foreach_event_cb fn, void *arg) 36848585484eSchristos { 36858585484eSchristos int r, i; 36868585484eSchristos unsigned u; 36878585484eSchristos struct event *ev; 36888585484eSchristos 36898585484eSchristos /* Start out with all the EVLIST_INSERTED events. */ 36908585484eSchristos if ((r = evmap_foreach_event_(base, fn, arg))) 36918585484eSchristos return r; 36928585484eSchristos 36938585484eSchristos /* Okay, now we deal with those events that have timeouts and are in 36948585484eSchristos * the min-heap. */ 36958585484eSchristos for (u = 0; u < base->timeheap.n; ++u) { 36968585484eSchristos ev = base->timeheap.p[u]; 36978585484eSchristos if (ev->ev_flags & EVLIST_INSERTED) { 36988585484eSchristos /* we already processed this one */ 36998585484eSchristos continue; 37008585484eSchristos } 37018585484eSchristos if ((r = fn(base, ev, arg))) 37028585484eSchristos return r; 37038585484eSchristos } 37048585484eSchristos 37058585484eSchristos /* Now for the events in one of the timeout queues. 37068585484eSchristos * the min-heap. */ 37078585484eSchristos for (i = 0; i < base->n_common_timeouts; ++i) { 37088585484eSchristos struct common_timeout_list *ctl = 37098585484eSchristos base->common_timeout_queues[i]; 37108585484eSchristos TAILQ_FOREACH(ev, &ctl->events, 37118585484eSchristos ev_timeout_pos.ev_next_with_common_timeout) { 37128585484eSchristos if (ev->ev_flags & EVLIST_INSERTED) { 37138585484eSchristos /* we already processed this one */ 37148585484eSchristos continue; 37158585484eSchristos } 37168585484eSchristos if ((r = fn(base, ev, arg))) 37178585484eSchristos return r; 37188585484eSchristos } 37198585484eSchristos } 37208585484eSchristos 37218585484eSchristos /* Finally, we deal wit all the active events that we haven't touched 37228585484eSchristos * yet. */ 37238585484eSchristos for (i = 0; i < base->nactivequeues; ++i) { 37248585484eSchristos struct event_callback *evcb; 37258585484eSchristos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { 37268585484eSchristos if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) { 37278585484eSchristos /* This isn't an event (evlist_init clear), or 37288585484eSchristos * we already processed it. (inserted or 37298585484eSchristos * timeout set */ 37308585484eSchristos continue; 37318585484eSchristos } 37328585484eSchristos ev = event_callback_to_event(evcb); 37338585484eSchristos if ((r = fn(base, ev, arg))) 37348585484eSchristos return r; 37358585484eSchristos } 37368585484eSchristos } 37378585484eSchristos 37388585484eSchristos return 0; 37398585484eSchristos } 37408585484eSchristos 37418585484eSchristos /* Helper for event_base_dump_events: called on each event in the event base; 37428585484eSchristos * dumps only the inserted events. */ 37438585484eSchristos static int 37448585484eSchristos dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg) 37458585484eSchristos { 37468585484eSchristos FILE *output = arg; 37478585484eSchristos const char *gloss = (e->ev_events & EV_SIGNAL) ? 37488585484eSchristos "sig" : "fd "; 37498585484eSchristos 37508585484eSchristos if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT))) 37518585484eSchristos return 0; 37528585484eSchristos 3753*eabc0478Schristos fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s", 37548585484eSchristos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), 37558585484eSchristos (e->ev_events&EV_READ)?" Read":"", 37568585484eSchristos (e->ev_events&EV_WRITE)?" Write":"", 3757b8ecfcfeSchristos (e->ev_events&EV_CLOSED)?" EOF":"", 37588585484eSchristos (e->ev_events&EV_SIGNAL)?" Signal":"", 37598585484eSchristos (e->ev_events&EV_PERSIST)?" Persist":"", 3760*eabc0478Schristos (e->ev_events&EV_ET)?" ET":"", 37618585484eSchristos (e->ev_flags&EVLIST_INTERNAL)?" Internal":""); 37628585484eSchristos if (e->ev_flags & EVLIST_TIMEOUT) { 37638585484eSchristos struct timeval tv; 37648585484eSchristos tv.tv_sec = e->ev_timeout.tv_sec; 37658585484eSchristos tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK; 37668585484eSchristos evutil_timeradd(&tv, &base->tv_clock_diff, &tv); 37678585484eSchristos fprintf(output, " Timeout=%ld.%06d", 37688585484eSchristos (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK)); 37698585484eSchristos } 37708585484eSchristos fputc('\n', output); 37718585484eSchristos 37728585484eSchristos return 0; 37738585484eSchristos } 37748585484eSchristos 37758585484eSchristos /* Helper for event_base_dump_events: called on each event in the event base; 37768585484eSchristos * dumps only the active events. */ 37778585484eSchristos static int 37788585484eSchristos dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg) 37798585484eSchristos { 37808585484eSchristos FILE *output = arg; 37818585484eSchristos const char *gloss = (e->ev_events & EV_SIGNAL) ? 37828585484eSchristos "sig" : "fd "; 37838585484eSchristos 37848585484eSchristos if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) 37858585484eSchristos return 0; 37868585484eSchristos 3787b8ecfcfeSchristos fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n", 37888585484eSchristos (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri, 37898585484eSchristos (e->ev_res&EV_READ)?" Read":"", 37908585484eSchristos (e->ev_res&EV_WRITE)?" Write":"", 3791b8ecfcfeSchristos (e->ev_res&EV_CLOSED)?" EOF":"", 37928585484eSchristos (e->ev_res&EV_SIGNAL)?" Signal":"", 37938585484eSchristos (e->ev_res&EV_TIMEOUT)?" Timeout":"", 37948585484eSchristos (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"", 37958585484eSchristos (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":""); 37968585484eSchristos 37978585484eSchristos return 0; 37988585484eSchristos } 37998585484eSchristos 38008585484eSchristos int 38018585484eSchristos event_base_foreach_event(struct event_base *base, 38028585484eSchristos event_base_foreach_event_cb fn, void *arg) 38038585484eSchristos { 38048585484eSchristos int r; 38058585484eSchristos if ((!fn) || (!base)) { 38068585484eSchristos return -1; 38078585484eSchristos } 38088585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 38098585484eSchristos r = event_base_foreach_event_nolock_(base, fn, arg); 38108585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 38118585484eSchristos return r; 38128585484eSchristos } 38138585484eSchristos 38148585484eSchristos 38158585484eSchristos void 38168585484eSchristos event_base_dump_events(struct event_base *base, FILE *output) 38178585484eSchristos { 38188585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 38198585484eSchristos fprintf(output, "Inserted events:\n"); 38208585484eSchristos event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output); 38218585484eSchristos 38228585484eSchristos fprintf(output, "Active events:\n"); 38238585484eSchristos event_base_foreach_event_nolock_(base, dump_active_event_fn, output); 38248585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 38258585484eSchristos } 38268585484eSchristos 38278585484eSchristos void 3828b8ecfcfeSchristos event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events) 3829b8ecfcfeSchristos { 3830b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3831*eabc0478Schristos 3832*eabc0478Schristos /* Activate any non timer events */ 3833*eabc0478Schristos if (!(events & EV_TIMEOUT)) { 3834b8ecfcfeSchristos evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED)); 3835*eabc0478Schristos } else { 3836*eabc0478Schristos /* If we want to activate timer events, loop and activate each event with 3837*eabc0478Schristos * the same fd in both the timeheap and common timeouts list */ 3838*eabc0478Schristos int i; 3839*eabc0478Schristos unsigned u; 3840*eabc0478Schristos struct event *ev; 3841*eabc0478Schristos 3842*eabc0478Schristos for (u = 0; u < base->timeheap.n; ++u) { 3843*eabc0478Schristos ev = base->timeheap.p[u]; 3844*eabc0478Schristos if (ev->ev_fd == fd) { 3845*eabc0478Schristos event_active_nolock_(ev, EV_TIMEOUT, 1); 3846*eabc0478Schristos } 3847*eabc0478Schristos } 3848*eabc0478Schristos 3849*eabc0478Schristos for (i = 0; i < base->n_common_timeouts; ++i) { 3850*eabc0478Schristos struct common_timeout_list *ctl = base->common_timeout_queues[i]; 3851*eabc0478Schristos TAILQ_FOREACH(ev, &ctl->events, 3852*eabc0478Schristos ev_timeout_pos.ev_next_with_common_timeout) { 3853*eabc0478Schristos if (ev->ev_fd == fd) { 3854*eabc0478Schristos event_active_nolock_(ev, EV_TIMEOUT, 1); 3855*eabc0478Schristos } 3856*eabc0478Schristos } 3857*eabc0478Schristos } 3858*eabc0478Schristos } 3859*eabc0478Schristos 3860b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 3861b8ecfcfeSchristos } 3862b8ecfcfeSchristos 3863b8ecfcfeSchristos void 3864b8ecfcfeSchristos event_base_active_by_signal(struct event_base *base, int sig) 3865b8ecfcfeSchristos { 3866b8ecfcfeSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3867b8ecfcfeSchristos evmap_signal_active_(base, sig, 1); 3868b8ecfcfeSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 3869b8ecfcfeSchristos } 3870b8ecfcfeSchristos 3871b8ecfcfeSchristos 3872b8ecfcfeSchristos void 38738585484eSchristos event_base_add_virtual_(struct event_base *base) 38748585484eSchristos { 38758585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 38768585484eSchristos base->virtual_event_count++; 3877b8ecfcfeSchristos MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count); 38788585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 38798585484eSchristos } 38808585484eSchristos 38818585484eSchristos void 38828585484eSchristos event_base_del_virtual_(struct event_base *base) 38838585484eSchristos { 38848585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 38858585484eSchristos EVUTIL_ASSERT(base->virtual_event_count > 0); 38868585484eSchristos base->virtual_event_count--; 38878585484eSchristos if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base)) 38888585484eSchristos evthread_notify_base(base); 38898585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 38908585484eSchristos } 38918585484eSchristos 38928585484eSchristos static void 38938585484eSchristos event_free_debug_globals_locks(void) 38948585484eSchristos { 38958585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 38968585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 38978585484eSchristos if (event_debug_map_lock_ != NULL) { 38988585484eSchristos EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0); 38998585484eSchristos event_debug_map_lock_ = NULL; 39007476e6e4Schristos evthreadimpl_disable_lock_debugging_(); 39018585484eSchristos } 39028585484eSchristos #endif /* EVENT__DISABLE_DEBUG_MODE */ 39038585484eSchristos #endif /* EVENT__DISABLE_THREAD_SUPPORT */ 39048585484eSchristos return; 39058585484eSchristos } 39068585484eSchristos 39078585484eSchristos static void 39088585484eSchristos event_free_debug_globals(void) 39098585484eSchristos { 39108585484eSchristos event_free_debug_globals_locks(); 39118585484eSchristos } 39128585484eSchristos 39138585484eSchristos static void 39148585484eSchristos event_free_evsig_globals(void) 39158585484eSchristos { 39168585484eSchristos evsig_free_globals_(); 39178585484eSchristos } 39188585484eSchristos 39198585484eSchristos static void 39208585484eSchristos event_free_evutil_globals(void) 39218585484eSchristos { 39228585484eSchristos evutil_free_globals_(); 39238585484eSchristos } 39248585484eSchristos 39258585484eSchristos static void 39268585484eSchristos event_free_globals(void) 39278585484eSchristos { 39288585484eSchristos event_free_debug_globals(); 39298585484eSchristos event_free_evsig_globals(); 39308585484eSchristos event_free_evutil_globals(); 39318585484eSchristos } 39328585484eSchristos 39338585484eSchristos void 39348585484eSchristos libevent_global_shutdown(void) 39358585484eSchristos { 39367476e6e4Schristos event_disable_debug_mode(); 39378585484eSchristos event_free_globals(); 39388585484eSchristos } 39398585484eSchristos 39408585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 39418585484eSchristos int 39428585484eSchristos event_global_setup_locks_(const int enable_locks) 39438585484eSchristos { 39448585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 39458585484eSchristos EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0); 39468585484eSchristos #endif 39478585484eSchristos if (evsig_global_setup_locks_(enable_locks) < 0) 39488585484eSchristos return -1; 39498585484eSchristos if (evutil_global_setup_locks_(enable_locks) < 0) 39508585484eSchristos return -1; 39518585484eSchristos if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0) 39528585484eSchristos return -1; 39538585484eSchristos return 0; 39548585484eSchristos } 39558585484eSchristos #endif 39568585484eSchristos 39578585484eSchristos void 39588585484eSchristos event_base_assert_ok_(struct event_base *base) 39598585484eSchristos { 39608585484eSchristos EVBASE_ACQUIRE_LOCK(base, th_base_lock); 39618585484eSchristos event_base_assert_ok_nolock_(base); 39628585484eSchristos EVBASE_RELEASE_LOCK(base, th_base_lock); 39638585484eSchristos } 39648585484eSchristos 39658585484eSchristos void 39668585484eSchristos event_base_assert_ok_nolock_(struct event_base *base) 39678585484eSchristos { 39688585484eSchristos int i; 39698585484eSchristos int count; 39708585484eSchristos 39718585484eSchristos /* First do checks on the per-fd and per-signal lists */ 39728585484eSchristos evmap_check_integrity_(base); 39738585484eSchristos 39748585484eSchristos /* Check the heap property */ 39758585484eSchristos for (i = 1; i < (int)base->timeheap.n; ++i) { 39768585484eSchristos int parent = (i - 1) / 2; 39778585484eSchristos struct event *ev, *p_ev; 39788585484eSchristos ev = base->timeheap.p[i]; 39798585484eSchristos p_ev = base->timeheap.p[parent]; 39808585484eSchristos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); 39818585484eSchristos EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=)); 39828585484eSchristos EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i); 39838585484eSchristos } 39848585484eSchristos 39858585484eSchristos /* Check that the common timeouts are fine */ 39868585484eSchristos for (i = 0; i < base->n_common_timeouts; ++i) { 39878585484eSchristos struct common_timeout_list *ctl = base->common_timeout_queues[i]; 39888585484eSchristos struct event *last=NULL, *ev; 39898585484eSchristos 39908585484eSchristos EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout); 39918585484eSchristos 39928585484eSchristos TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) { 39938585484eSchristos if (last) 39948585484eSchristos EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=)); 39958585484eSchristos EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); 39968585484eSchristos EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base)); 39978585484eSchristos EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i); 39988585484eSchristos last = ev; 39998585484eSchristos } 40008585484eSchristos } 40018585484eSchristos 40028585484eSchristos /* Check the active queues. */ 40038585484eSchristos count = 0; 40048585484eSchristos for (i = 0; i < base->nactivequeues; ++i) { 40058585484eSchristos struct event_callback *evcb; 40068585484eSchristos EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next); 40078585484eSchristos TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { 40088585484eSchristos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE); 40098585484eSchristos EVUTIL_ASSERT(evcb->evcb_pri == i); 40108585484eSchristos ++count; 40118585484eSchristos } 40128585484eSchristos } 40138585484eSchristos 40148585484eSchristos { 40158585484eSchristos struct event_callback *evcb; 40168585484eSchristos TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) { 40178585484eSchristos EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER); 40188585484eSchristos ++count; 40198585484eSchristos } 40208585484eSchristos } 40218585484eSchristos EVUTIL_ASSERT(count == base->event_count_active); 40228585484eSchristos } 4023