1*eabc0478Schristos /* $NetBSD: event-internal.h,v 1.6 2024/08/18 20:47:21 christos Exp $ */ 28585484eSchristos 38585484eSchristos /* 48585484eSchristos * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> 58585484eSchristos * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 68585484eSchristos * 78585484eSchristos * Redistribution and use in source and binary forms, with or without 88585484eSchristos * modification, are permitted provided that the following conditions 98585484eSchristos * are met: 108585484eSchristos * 1. Redistributions of source code must retain the above copyright 118585484eSchristos * notice, this list of conditions and the following disclaimer. 128585484eSchristos * 2. Redistributions in binary form must reproduce the above copyright 138585484eSchristos * notice, this list of conditions and the following disclaimer in the 148585484eSchristos * documentation and/or other materials provided with the distribution. 158585484eSchristos * 3. The name of the author may not be used to endorse or promote products 168585484eSchristos * derived from this software without specific prior written permission. 178585484eSchristos * 188585484eSchristos * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 198585484eSchristos * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 208585484eSchristos * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 218585484eSchristos * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 228585484eSchristos * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 238585484eSchristos * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 248585484eSchristos * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 258585484eSchristos * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 268585484eSchristos * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 278585484eSchristos * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 288585484eSchristos */ 298585484eSchristos #ifndef EVENT_INTERNAL_H_INCLUDED_ 308585484eSchristos #define EVENT_INTERNAL_H_INCLUDED_ 318585484eSchristos 328585484eSchristos #ifdef __cplusplus 338585484eSchristos extern "C" { 348585484eSchristos #endif 358585484eSchristos 368585484eSchristos #include "event2/event-config.h" 378585484eSchristos #include "evconfig-private.h" 388585484eSchristos 398585484eSchristos #include <time.h> 408585484eSchristos #include <sys/queue.h> 418585484eSchristos #include "event2/event_struct.h" 428585484eSchristos #include "minheap-internal.h" 438585484eSchristos #include "evsignal-internal.h" 448585484eSchristos #include "mm-internal.h" 458585484eSchristos #include "defer-internal.h" 468585484eSchristos 478585484eSchristos /* map union members back */ 488585484eSchristos 498585484eSchristos /* mutually exclusive */ 508585484eSchristos #define ev_signal_next ev_.ev_signal.ev_signal_next 518585484eSchristos #define ev_io_next ev_.ev_io.ev_io_next 528585484eSchristos #define ev_io_timeout ev_.ev_io.ev_timeout 538585484eSchristos 548585484eSchristos /* used only by signals */ 558585484eSchristos #define ev_ncalls ev_.ev_signal.ev_ncalls 568585484eSchristos #define ev_pncalls ev_.ev_signal.ev_pncalls 578585484eSchristos 588585484eSchristos #define ev_pri ev_evcallback.evcb_pri 598585484eSchristos #define ev_flags ev_evcallback.evcb_flags 608585484eSchristos #define ev_closure ev_evcallback.evcb_closure 618585484eSchristos #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback 628585484eSchristos #define ev_arg ev_evcallback.evcb_arg 638585484eSchristos 64b8ecfcfeSchristos /** @name Event closure codes 65b8ecfcfeSchristos 66b8ecfcfeSchristos Possible values for evcb_closure in struct event_callback 67b8ecfcfeSchristos 68b8ecfcfeSchristos @{ 69b8ecfcfeSchristos */ 70b8ecfcfeSchristos /** A regular event. Uses the evcb_callback callback */ 718585484eSchristos #define EV_CLOSURE_EVENT 0 72b8ecfcfeSchristos /** A signal event. Uses the evcb_callback callback */ 738585484eSchristos #define EV_CLOSURE_EVENT_SIGNAL 1 74b8ecfcfeSchristos /** A persistent non-signal event. Uses the evcb_callback callback */ 758585484eSchristos #define EV_CLOSURE_EVENT_PERSIST 2 76b8ecfcfeSchristos /** A simple callback. Uses the evcb_selfcb callback. */ 778585484eSchristos #define EV_CLOSURE_CB_SELF 3 78b8ecfcfeSchristos /** A finalizing callback. Uses the evcb_cbfinalize callback. */ 79b8ecfcfeSchristos #define EV_CLOSURE_CB_FINALIZE 4 80b8ecfcfeSchristos /** A finalizing event. Uses the evcb_evfinalize callback. */ 81b8ecfcfeSchristos #define EV_CLOSURE_EVENT_FINALIZE 5 82b8ecfcfeSchristos /** A finalizing event that should get freed after. Uses the evcb_evfinalize 83b8ecfcfeSchristos * callback. */ 84b8ecfcfeSchristos #define EV_CLOSURE_EVENT_FINALIZE_FREE 6 85b8ecfcfeSchristos /** @} */ 868585484eSchristos 878585484eSchristos /** Structure to define the backend of a given event_base. */ 888585484eSchristos struct eventop { 898585484eSchristos /** The name of this backend. */ 908585484eSchristos const char *name; 918585484eSchristos /** Function to set up an event_base to use this backend. It should 928585484eSchristos * create a new structure holding whatever information is needed to 938585484eSchristos * run the backend, and return it. The returned pointer will get 948585484eSchristos * stored by event_init into the event_base.evbase field. On failure, 958585484eSchristos * this function should return NULL. */ 968585484eSchristos void *(*init)(struct event_base *); 978585484eSchristos /** Enable reading/writing on a given fd or signal. 'events' will be 988585484eSchristos * the events that we're trying to enable: one or more of EV_READ, 998585484eSchristos * EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that 1008585484eSchristos * were enabled on this fd previously. 'fdinfo' will be a structure 1018585484eSchristos * associated with the fd by the evmap; its size is defined by the 1028585484eSchristos * fdinfo field below. It will be set to 0 the first time the fd is 1038585484eSchristos * added. The function should return 0 on success and -1 on error. 1048585484eSchristos */ 1058585484eSchristos int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); 1068585484eSchristos /** As "add", except 'events' contains the events we mean to disable. */ 1078585484eSchristos int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo); 1088585484eSchristos /** Function to implement the core of an event loop. It must see which 1098585484eSchristos added events are ready, and cause event_active to be called for each 1108585484eSchristos active event (usually via event_io_active or such). It should 1118585484eSchristos return 0 on success and -1 on error. 1128585484eSchristos */ 1138585484eSchristos int (*dispatch)(struct event_base *, struct timeval *); 1148585484eSchristos /** Function to clean up and free our data from the event_base. */ 1158585484eSchristos void (*dealloc)(struct event_base *); 1168585484eSchristos /** Flag: set if we need to reinitialize the event base after we fork. 1178585484eSchristos */ 1188585484eSchristos int need_reinit; 1198585484eSchristos /** Bit-array of supported event_method_features that this backend can 1208585484eSchristos * provide. */ 1218585484eSchristos enum event_method_feature features; 1228585484eSchristos /** Length of the extra information we should record for each fd that 1238585484eSchristos has one or more active events. This information is recorded 1248585484eSchristos as part of the evmap entry for each fd, and passed as an argument 1258585484eSchristos to the add and del functions above. 1268585484eSchristos */ 1278585484eSchristos size_t fdinfo_len; 1288585484eSchristos }; 1298585484eSchristos 1308585484eSchristos #ifdef _WIN32 1318585484eSchristos /* If we're on win32, then file descriptors are not nice low densely packed 1328585484eSchristos integers. Instead, they are pointer-like windows handles, and we want to 1338585484eSchristos use a hashtable instead of an array to map fds to events. 1348585484eSchristos */ 1358585484eSchristos #define EVMAP_USE_HT 1368585484eSchristos #endif 1378585484eSchristos 1388585484eSchristos /* #define HT_CACHE_HASH_VALS */ 1398585484eSchristos 1408585484eSchristos #ifdef EVMAP_USE_HT 1418585484eSchristos #define HT_NO_CACHE_HASH_VALUES 1428585484eSchristos #include "ht-internal.h" 1438585484eSchristos struct event_map_entry; 1448585484eSchristos HT_HEAD(event_io_map, event_map_entry); 1458585484eSchristos #else 1468585484eSchristos #define event_io_map event_signal_map 1478585484eSchristos #endif 1488585484eSchristos 1498585484eSchristos /* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not 1508585484eSchristos defined, this structure is also used as event_io_map, which maps fds to a 1518585484eSchristos list of events. 1528585484eSchristos */ 1538585484eSchristos struct event_signal_map { 1548585484eSchristos /* An array of evmap_io * or of evmap_signal *; empty entries are 1558585484eSchristos * set to NULL. */ 1568585484eSchristos void **entries; 1578585484eSchristos /* The number of entries available in entries */ 1588585484eSchristos int nentries; 1598585484eSchristos }; 1608585484eSchristos 1618585484eSchristos /* A list of events waiting on a given 'common' timeout value. Ordinarily, 1628585484eSchristos * events waiting for a timeout wait on a minheap. Sometimes, however, a 1638585484eSchristos * queue can be faster. 1648585484eSchristos **/ 1658585484eSchristos struct common_timeout_list { 1668585484eSchristos /* List of events currently waiting in the queue. */ 1678585484eSchristos struct event_list events; 1688585484eSchristos /* 'magic' timeval used to indicate the duration of events in this 1698585484eSchristos * queue. */ 1708585484eSchristos struct timeval duration; 1718585484eSchristos /* Event that triggers whenever one of the events in the queue is 1728585484eSchristos * ready to activate */ 1738585484eSchristos struct event timeout_event; 1748585484eSchristos /* The event_base that this timeout list is part of */ 1758585484eSchristos struct event_base *base; 1768585484eSchristos }; 1778585484eSchristos 1788585484eSchristos /** Mask used to get the real tv_usec value from a common timeout. */ 1798585484eSchristos #define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff 1808585484eSchristos 1818585484eSchristos struct event_change; 1828585484eSchristos 1838585484eSchristos /* List of 'changes' since the last call to eventop.dispatch. Only maintained 1848585484eSchristos * if the backend is using changesets. */ 1858585484eSchristos struct event_changelist { 1868585484eSchristos struct event_change *changes; 1878585484eSchristos int n_changes; 1888585484eSchristos int changes_size; 1898585484eSchristos }; 1908585484eSchristos 1918585484eSchristos #ifndef EVENT__DISABLE_DEBUG_MODE 1928585484eSchristos /* Global internal flag: set to one if debug mode is on. */ 1938585484eSchristos extern int event_debug_mode_on_; 1948585484eSchristos #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_) 1958585484eSchristos #else 1968585484eSchristos #define EVENT_DEBUG_MODE_IS_ON() (0) 1978585484eSchristos #endif 1988585484eSchristos 1998585484eSchristos TAILQ_HEAD(evcallback_list, event_callback); 2008585484eSchristos 2018585484eSchristos /* Sets up an event for processing once */ 2028585484eSchristos struct event_once { 2038585484eSchristos LIST_ENTRY(event_once) next_once; 2048585484eSchristos struct event ev; 2058585484eSchristos 2068585484eSchristos void (*cb)(evutil_socket_t, short, void *); 2078585484eSchristos void *arg; 2088585484eSchristos }; 2098585484eSchristos 2108585484eSchristos struct event_base { 2118585484eSchristos /** Function pointers and other data to describe this event_base's 2128585484eSchristos * backend. */ 2138585484eSchristos const struct eventop *evsel; 2148585484eSchristos /** Pointer to backend-specific data. */ 2158585484eSchristos void *evbase; 2168585484eSchristos 2178585484eSchristos /** List of changes to tell backend about at next dispatch. Only used 2188585484eSchristos * by the O(1) backends. */ 2198585484eSchristos struct event_changelist changelist; 2208585484eSchristos 2218585484eSchristos /** Function pointers used to describe the backend that this event_base 2228585484eSchristos * uses for signals */ 2238585484eSchristos const struct eventop *evsigsel; 224*eabc0478Schristos /** Data to implement the common signal handler code. */ 2258585484eSchristos struct evsig_info sig; 2268585484eSchristos 2278585484eSchristos /** Number of virtual events */ 2288585484eSchristos int virtual_event_count; 229b8ecfcfeSchristos /** Maximum number of virtual events active */ 230b8ecfcfeSchristos int virtual_event_count_max; 2318585484eSchristos /** Number of total events added to this event_base */ 2328585484eSchristos int event_count; 233b8ecfcfeSchristos /** Maximum number of total events added to this event_base */ 234b8ecfcfeSchristos int event_count_max; 2358585484eSchristos /** Number of total events active in this event_base */ 2368585484eSchristos int event_count_active; 237b8ecfcfeSchristos /** Maximum number of total events active in this event_base */ 238b8ecfcfeSchristos int event_count_active_max; 2398585484eSchristos 2408585484eSchristos /** Set if we should terminate the loop once we're done processing 2418585484eSchristos * events. */ 2428585484eSchristos int event_gotterm; 2438585484eSchristos /** Set if we should terminate the loop immediately */ 2448585484eSchristos int event_break; 2458585484eSchristos /** Set if we should start a new instance of the loop immediately. */ 2468585484eSchristos int event_continue; 2478585484eSchristos 2488585484eSchristos /** The currently running priority of events */ 2498585484eSchristos int event_running_priority; 2508585484eSchristos 2518585484eSchristos /** Set if we're running the event_base_loop function, to prevent 2528585484eSchristos * reentrant invocation. */ 2538585484eSchristos int running_loop; 2548585484eSchristos 2558585484eSchristos /** Set to the number of deferred_cbs we've made 'active' in the 2568585484eSchristos * loop. This is a hack to prevent starvation; it would be smarter 2578585484eSchristos * to just use event_config_set_max_dispatch_interval's max_callbacks 2588585484eSchristos * feature */ 2598585484eSchristos int n_deferreds_queued; 2608585484eSchristos 2618585484eSchristos /* Active event management. */ 2628585484eSchristos /** An array of nactivequeues queues for active event_callbacks (ones 2638585484eSchristos * that have triggered, and whose callbacks need to be called). Low 2648585484eSchristos * priority numbers are more important, and stall higher ones. 2658585484eSchristos */ 2668585484eSchristos struct evcallback_list *activequeues; 2678585484eSchristos /** The length of the activequeues array */ 2688585484eSchristos int nactivequeues; 2698585484eSchristos /** A list of event_callbacks that should become active the next time 2708585484eSchristos * we process events, but not this time. */ 2718585484eSchristos struct evcallback_list active_later_queue; 2728585484eSchristos 2738585484eSchristos /* common timeout logic */ 2748585484eSchristos 2758585484eSchristos /** An array of common_timeout_list* for all of the common timeout 2768585484eSchristos * values we know. */ 2778585484eSchristos struct common_timeout_list **common_timeout_queues; 2788585484eSchristos /** The number of entries used in common_timeout_queues */ 2798585484eSchristos int n_common_timeouts; 2808585484eSchristos /** The total size of common_timeout_queues. */ 2818585484eSchristos int n_common_timeouts_allocated; 2828585484eSchristos 2838585484eSchristos /** Mapping from file descriptors to enabled (added) events */ 2848585484eSchristos struct event_io_map io; 2858585484eSchristos 2868585484eSchristos /** Mapping from signal numbers to enabled (added) events. */ 2878585484eSchristos struct event_signal_map sigmap; 2888585484eSchristos 2898585484eSchristos /** Priority queue of events with timeouts. */ 2908585484eSchristos struct min_heap timeheap; 2918585484eSchristos 2928585484eSchristos /** Stored timeval: used to avoid calling gettimeofday/clock_gettime 2938585484eSchristos * too often. */ 2948585484eSchristos struct timeval tv_cache; 2958585484eSchristos 2968585484eSchristos struct evutil_monotonic_timer monotonic_timer; 2978585484eSchristos 2988585484eSchristos /** Difference between internal time (maybe from clock_gettime) and 2998585484eSchristos * gettimeofday. */ 3008585484eSchristos struct timeval tv_clock_diff; 3018585484eSchristos /** Second in which we last updated tv_clock_diff, in monotonic time. */ 3028585484eSchristos time_t last_updated_clock_diff; 3038585484eSchristos 3048585484eSchristos #ifndef EVENT__DISABLE_THREAD_SUPPORT 3058585484eSchristos /* threading support */ 3068585484eSchristos /** The thread currently running the event_loop for this base */ 3078585484eSchristos unsigned long th_owner_id; 3088585484eSchristos /** A lock to prevent conflicting accesses to this event_base */ 3098585484eSchristos void *th_base_lock; 3108585484eSchristos /** A condition that gets signalled when we're done processing an 3118585484eSchristos * event with waiters on it. */ 3128585484eSchristos void *current_event_cond; 3138585484eSchristos /** Number of threads blocking on current_event_cond. */ 3148585484eSchristos int current_event_waiters; 3158585484eSchristos #endif 3168585484eSchristos /** The event whose callback is executing right now */ 3178585484eSchristos struct event_callback *current_event; 3188585484eSchristos 3198585484eSchristos #ifdef _WIN32 3208585484eSchristos /** IOCP support structure, if IOCP is enabled. */ 3218585484eSchristos struct event_iocp_port *iocp; 3228585484eSchristos #endif 3238585484eSchristos 3248585484eSchristos /** Flags that this base was configured with */ 3258585484eSchristos enum event_base_config_flag flags; 3268585484eSchristos 3278585484eSchristos struct timeval max_dispatch_time; 3288585484eSchristos int max_dispatch_callbacks; 3298585484eSchristos int limit_callbacks_after_prio; 3308585484eSchristos 3318585484eSchristos /* Notify main thread to wake up break, etc. */ 3328585484eSchristos /** True if the base already has a pending notify, and we don't need 3338585484eSchristos * to add any more. */ 3348585484eSchristos int is_notify_pending; 3358585484eSchristos /** A socketpair used by some th_notify functions to wake up the main 3368585484eSchristos * thread. */ 3378585484eSchristos evutil_socket_t th_notify_fd[2]; 3388585484eSchristos /** An event used by some th_notify functions to wake up the main 3398585484eSchristos * thread. */ 3408585484eSchristos struct event th_notify; 3418585484eSchristos /** A function used to wake up the main thread from another thread. */ 3428585484eSchristos int (*th_notify_fn)(struct event_base *base); 3438585484eSchristos 3448585484eSchristos /** Saved seed for weak random number generator. Some backends use 3458585484eSchristos * this to produce fairness among sockets. Protected by th_base_lock. */ 3468585484eSchristos struct evutil_weakrand_state weakrand_seed; 3478585484eSchristos 3488585484eSchristos /** List of event_onces that have not yet fired. */ 3498585484eSchristos LIST_HEAD(once_event_list, event_once) once_events; 3508585484eSchristos 3518585484eSchristos }; 3528585484eSchristos 3538585484eSchristos struct event_config_entry { 3548585484eSchristos TAILQ_ENTRY(event_config_entry) next; 3558585484eSchristos 3568585484eSchristos const char *avoid_method; 3578585484eSchristos }; 3588585484eSchristos 3598585484eSchristos /** Internal structure: describes the configuration we want for an event_base 3608585484eSchristos * that we're about to allocate. */ 3618585484eSchristos struct event_config { 3628585484eSchristos TAILQ_HEAD(event_configq, event_config_entry) entries; 3638585484eSchristos 3648585484eSchristos int n_cpus_hint; 3658585484eSchristos struct timeval max_dispatch_interval; 3668585484eSchristos int max_dispatch_callbacks; 3678585484eSchristos int limit_callbacks_after_prio; 3688585484eSchristos enum event_method_feature require_features; 3698585484eSchristos enum event_base_config_flag flags; 3708585484eSchristos }; 3718585484eSchristos 3728585484eSchristos /* Internal use only: Functions that might be missing from <sys/queue.h> */ 373*eabc0478Schristos #ifndef LIST_END 374*eabc0478Schristos #define LIST_END(head) NULL 375*eabc0478Schristos #endif 376*eabc0478Schristos 3778585484eSchristos #ifndef TAILQ_FIRST 3788585484eSchristos #define TAILQ_FIRST(head) ((head)->tqh_first) 3798585484eSchristos #endif 3808585484eSchristos #ifndef TAILQ_END 3818585484eSchristos #define TAILQ_END(head) NULL 3828585484eSchristos #endif 3838585484eSchristos #ifndef TAILQ_NEXT 3848585484eSchristos #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 3858585484eSchristos #endif 3868585484eSchristos 3878585484eSchristos #ifndef TAILQ_FOREACH 3888585484eSchristos #define TAILQ_FOREACH(var, head, field) \ 3898585484eSchristos for ((var) = TAILQ_FIRST(head); \ 3908585484eSchristos (var) != TAILQ_END(head); \ 3918585484eSchristos (var) = TAILQ_NEXT(var, field)) 3928585484eSchristos #endif 3938585484eSchristos 3948585484eSchristos #ifndef TAILQ_INSERT_BEFORE 3958585484eSchristos #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 3968585484eSchristos (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 3978585484eSchristos (elm)->field.tqe_next = (listelm); \ 3988585484eSchristos *(listelm)->field.tqe_prev = (elm); \ 3998585484eSchristos (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ 4008585484eSchristos } while (0) 4018585484eSchristos #endif 4028585484eSchristos 4038585484eSchristos #define N_ACTIVE_CALLBACKS(base) \ 4048585484eSchristos ((base)->event_count_active) 4058585484eSchristos 4068585484eSchristos int evsig_set_handler_(struct event_base *base, int evsignal, 4078585484eSchristos void (*fn)(int)); 4088585484eSchristos int evsig_restore_handler_(struct event_base *base, int evsignal); 4098585484eSchristos 4108585484eSchristos int event_add_nolock_(struct event *ev, 4118585484eSchristos const struct timeval *tv, int tv_is_absolute); 412b8ecfcfeSchristos /** Argument for event_del_nolock_. Tells event_del not to block on the event 413b8ecfcfeSchristos * if it's running in another thread. */ 414b8ecfcfeSchristos #define EVENT_DEL_NOBLOCK 0 415b8ecfcfeSchristos /** Argument for event_del_nolock_. Tells event_del to block on the event 416b8ecfcfeSchristos * if it's running in another thread, regardless of its value for EV_FINALIZE 417b8ecfcfeSchristos */ 418b8ecfcfeSchristos #define EVENT_DEL_BLOCK 1 419b8ecfcfeSchristos /** Argument for event_del_nolock_. Tells event_del to block on the event 420b8ecfcfeSchristos * if it is running in another thread and it doesn't have EV_FINALIZE set. 421b8ecfcfeSchristos */ 422b8ecfcfeSchristos #define EVENT_DEL_AUTOBLOCK 2 423*eabc0478Schristos /** Argument for event_del_nolock_. Tells event_del to proceed even if the 424b8ecfcfeSchristos * event is set up for finalization rather for regular use.*/ 425b8ecfcfeSchristos #define EVENT_DEL_EVEN_IF_FINALIZING 3 426b8ecfcfeSchristos int event_del_nolock_(struct event *ev, int blocking); 4278585484eSchristos int event_remove_timer_nolock_(struct event *ev); 4288585484eSchristos 4298585484eSchristos void event_active_nolock_(struct event *ev, int res, short count); 430*eabc0478Schristos EVENT2_EXPORT_SYMBOL 4318585484eSchristos int event_callback_activate_(struct event_base *, struct event_callback *); 4328585484eSchristos int event_callback_activate_nolock_(struct event_base *, struct event_callback *); 4338585484eSchristos int event_callback_cancel_(struct event_base *base, 4348585484eSchristos struct event_callback *evcb); 4358585484eSchristos 436b8ecfcfeSchristos void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)); 437*eabc0478Schristos EVENT2_EXPORT_SYMBOL 438b8ecfcfeSchristos void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)); 439b8ecfcfeSchristos int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *)); 440b8ecfcfeSchristos 441b8ecfcfeSchristos 442*eabc0478Schristos EVENT2_EXPORT_SYMBOL 4438585484eSchristos void event_active_later_(struct event *ev, int res); 4448585484eSchristos void event_active_later_nolock_(struct event *ev, int res); 445*eabc0478Schristos int event_callback_activate_later_nolock_(struct event_base *base, 4468585484eSchristos struct event_callback *evcb); 4478585484eSchristos int event_callback_cancel_nolock_(struct event_base *base, 448b8ecfcfeSchristos struct event_callback *evcb, int even_if_finalizing); 4498585484eSchristos void event_callback_init_(struct event_base *base, 4508585484eSchristos struct event_callback *cb); 4518585484eSchristos 4528585484eSchristos /* FIXME document. */ 453*eabc0478Schristos EVENT2_EXPORT_SYMBOL 4548585484eSchristos void event_base_add_virtual_(struct event_base *base); 4558585484eSchristos void event_base_del_virtual_(struct event_base *base); 4568585484eSchristos 4578585484eSchristos /** For debugging: unless assertions are disabled, verify the referential 4588585484eSchristos integrity of the internal data structures of 'base'. This operation can 4598585484eSchristos be expensive. 4608585484eSchristos 4618585484eSchristos Returns on success; aborts on failure. 4628585484eSchristos */ 463*eabc0478Schristos EVENT2_EXPORT_SYMBOL 4648585484eSchristos void event_base_assert_ok_(struct event_base *base); 4658585484eSchristos void event_base_assert_ok_nolock_(struct event_base *base); 4668585484eSchristos 4678585484eSchristos 4688585484eSchristos /* Helper function: Call 'fn' exactly once every inserted or active event in 4698585484eSchristos * the event_base 'base'. 4708585484eSchristos * 4718585484eSchristos * If fn returns 0, continue on to the next event. Otherwise, return the same 4728585484eSchristos * value that fn returned. 4738585484eSchristos * 4748585484eSchristos * Requires that 'base' be locked. 4758585484eSchristos */ 4768585484eSchristos int event_base_foreach_event_nolock_(struct event_base *base, 4778585484eSchristos event_base_foreach_event_cb cb, void *arg); 4788585484eSchristos 479*eabc0478Schristos /* Cleanup function to reset debug mode during shutdown. 480*eabc0478Schristos * 481*eabc0478Schristos * Calling this function doesn't mean it'll be possible to re-enable 482*eabc0478Schristos * debug mode if any events were added. 483*eabc0478Schristos */ 484*eabc0478Schristos void event_disable_debug_mode(void); 485*eabc0478Schristos 4868585484eSchristos #ifdef __cplusplus 4878585484eSchristos } 4888585484eSchristos #endif 4898585484eSchristos 4908585484eSchristos #endif /* EVENT_INTERNAL_H_INCLUDED_ */ 491