xref: /netbsd-src/external/bsd/libevent/dist/event-internal.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: event-internal.h,v 1.3 2017/01/31 23:17:39 christos Exp $	*/
2 /*
3  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #ifndef EVENT_INTERNAL_H_INCLUDED_
29 #define EVENT_INTERNAL_H_INCLUDED_
30 
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #include "event2/event-config.h"
36 #include "evconfig-private.h"
37 
38 #include <time.h>
39 #include <sys/queue.h>
40 #include "event2/event_struct.h"
41 #include "minheap-internal.h"
42 #include "evsignal-internal.h"
43 #include "mm-internal.h"
44 #include "defer-internal.h"
45 
46 /* map union members back */
47 
48 /* mutually exclusive */
49 #define ev_signal_next	ev_.ev_signal.ev_signal_next
50 #define ev_io_next	ev_.ev_io.ev_io_next
51 #define ev_io_timeout	ev_.ev_io.ev_timeout
52 
53 /* used only by signals */
54 #define ev_ncalls	ev_.ev_signal.ev_ncalls
55 #define ev_pncalls	ev_.ev_signal.ev_pncalls
56 
57 #define ev_pri ev_evcallback.evcb_pri
58 #define ev_flags ev_evcallback.evcb_flags
59 #define ev_closure ev_evcallback.evcb_closure
60 #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
61 #define ev_arg ev_evcallback.evcb_arg
62 
63 /** @name Event closure codes
64 
65     Possible values for evcb_closure in struct event_callback
66 
67     @{
68  */
69 /** A regular event. Uses the evcb_callback callback */
70 #define EV_CLOSURE_EVENT 0
71 /** A signal event. Uses the evcb_callback callback */
72 #define EV_CLOSURE_EVENT_SIGNAL 1
73 /** A persistent non-signal event. Uses the evcb_callback callback */
74 #define EV_CLOSURE_EVENT_PERSIST 2
75 /** A simple callback. Uses the evcb_selfcb callback. */
76 #define EV_CLOSURE_CB_SELF 3
77 /** A finalizing callback. Uses the evcb_cbfinalize callback. */
78 #define EV_CLOSURE_CB_FINALIZE 4
79 /** A finalizing event. Uses the evcb_evfinalize callback. */
80 #define EV_CLOSURE_EVENT_FINALIZE 5
81 /** A finalizing event that should get freed after. Uses the evcb_evfinalize
82  * callback. */
83 #define EV_CLOSURE_EVENT_FINALIZE_FREE 6
84 /** @} */
85 
86 /** Structure to define the backend of a given event_base. */
87 struct eventop {
88 	/** The name of this backend. */
89 	const char *name;
90 	/** Function to set up an event_base to use this backend.  It should
91 	 * create a new structure holding whatever information is needed to
92 	 * run the backend, and return it.  The returned pointer will get
93 	 * stored by event_init into the event_base.evbase field.  On failure,
94 	 * this function should return NULL. */
95 	void *(*init)(struct event_base *);
96 	/** Enable reading/writing on a given fd or signal.  'events' will be
97 	 * the events that we're trying to enable: one or more of EV_READ,
98 	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
99 	 * were enabled on this fd previously.  'fdinfo' will be a structure
100 	 * associated with the fd by the evmap; its size is defined by the
101 	 * fdinfo field below.  It will be set to 0 the first time the fd is
102 	 * added.  The function should return 0 on success and -1 on error.
103 	 */
104 	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
105 	/** As "add", except 'events' contains the events we mean to disable. */
106 	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
107 	/** Function to implement the core of an event loop.  It must see which
108 	    added events are ready, and cause event_active to be called for each
109 	    active event (usually via event_io_active or such).  It should
110 	    return 0 on success and -1 on error.
111 	 */
112 	int (*dispatch)(struct event_base *, struct timeval *);
113 	/** Function to clean up and free our data from the event_base. */
114 	void (*dealloc)(struct event_base *);
115 	/** Flag: set if we need to reinitialize the event base after we fork.
116 	 */
117 	int need_reinit;
118 	/** Bit-array of supported event_method_features that this backend can
119 	 * provide. */
120 	enum event_method_feature features;
121 	/** Length of the extra information we should record for each fd that
122 	    has one or more active events.  This information is recorded
123 	    as part of the evmap entry for each fd, and passed as an argument
124 	    to the add and del functions above.
125 	 */
126 	size_t fdinfo_len;
127 };
128 
129 #ifdef _WIN32
130 /* If we're on win32, then file descriptors are not nice low densely packed
131    integers.  Instead, they are pointer-like windows handles, and we want to
132    use a hashtable instead of an array to map fds to events.
133 */
134 #define EVMAP_USE_HT
135 #endif
136 
137 /* #define HT_CACHE_HASH_VALS */
138 
139 #ifdef EVMAP_USE_HT
140 #define HT_NO_CACHE_HASH_VALUES
141 #include "ht-internal.h"
142 struct event_map_entry;
143 HT_HEAD(event_io_map, event_map_entry);
144 #else
145 #define event_io_map event_signal_map
146 #endif
147 
148 /* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
149    defined, this structure is also used as event_io_map, which maps fds to a
150    list of events.
151 */
152 struct event_signal_map {
153 	/* An array of evmap_io * or of evmap_signal *; empty entries are
154 	 * set to NULL. */
155 	void **entries;
156 	/* The number of entries available in entries */
157 	int nentries;
158 };
159 
160 /* A list of events waiting on a given 'common' timeout value.  Ordinarily,
161  * events waiting for a timeout wait on a minheap.  Sometimes, however, a
162  * queue can be faster.
163  **/
164 struct common_timeout_list {
165 	/* List of events currently waiting in the queue. */
166 	struct event_list events;
167 	/* 'magic' timeval used to indicate the duration of events in this
168 	 * queue. */
169 	struct timeval duration;
170 	/* Event that triggers whenever one of the events in the queue is
171 	 * ready to activate */
172 	struct event timeout_event;
173 	/* The event_base that this timeout list is part of */
174 	struct event_base *base;
175 };
176 
177 /** Mask used to get the real tv_usec value from a common timeout. */
178 #define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
179 
180 struct event_change;
181 
182 /* List of 'changes' since the last call to eventop.dispatch.  Only maintained
183  * if the backend is using changesets. */
184 struct event_changelist {
185 	struct event_change *changes;
186 	int n_changes;
187 	int changes_size;
188 };
189 
190 #ifndef EVENT__DISABLE_DEBUG_MODE
191 /* Global internal flag: set to one if debug mode is on. */
192 extern int event_debug_mode_on_;
193 #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
194 #else
195 #define EVENT_DEBUG_MODE_IS_ON() (0)
196 #endif
197 
198 TAILQ_HEAD(evcallback_list, event_callback);
199 
200 /* Sets up an event for processing once */
201 struct event_once {
202 	LIST_ENTRY(event_once) next_once;
203 	struct event ev;
204 
205 	void (*cb)(evutil_socket_t, short, void *);
206 	void *arg;
207 };
208 
209 struct event_base {
210 	/** Function pointers and other data to describe this event_base's
211 	 * backend. */
212 	const struct eventop *evsel;
213 	/** Pointer to backend-specific data. */
214 	void *evbase;
215 
216 	/** List of changes to tell backend about at next dispatch.  Only used
217 	 * by the O(1) backends. */
218 	struct event_changelist changelist;
219 
220 	/** Function pointers used to describe the backend that this event_base
221 	 * uses for signals */
222 	const struct eventop *evsigsel;
223 	/** Data to implement the common signal handelr code. */
224 	struct evsig_info sig;
225 
226 	/** Number of virtual events */
227 	int virtual_event_count;
228 	/** Maximum number of virtual events active */
229 	int virtual_event_count_max;
230 	/** Number of total events added to this event_base */
231 	int event_count;
232 	/** Maximum number of total events added to this event_base */
233 	int event_count_max;
234 	/** Number of total events active in this event_base */
235 	int event_count_active;
236 	/** Maximum number of total events active in this event_base */
237 	int event_count_active_max;
238 
239 	/** Set if we should terminate the loop once we're done processing
240 	 * events. */
241 	int event_gotterm;
242 	/** Set if we should terminate the loop immediately */
243 	int event_break;
244 	/** Set if we should start a new instance of the loop immediately. */
245 	int event_continue;
246 
247 	/** The currently running priority of events */
248 	int event_running_priority;
249 
250 	/** Set if we're running the event_base_loop function, to prevent
251 	 * reentrant invocation. */
252 	int running_loop;
253 
254 	/** Set to the number of deferred_cbs we've made 'active' in the
255 	 * loop.  This is a hack to prevent starvation; it would be smarter
256 	 * to just use event_config_set_max_dispatch_interval's max_callbacks
257 	 * feature */
258 	int n_deferreds_queued;
259 
260 	/* Active event management. */
261 	/** An array of nactivequeues queues for active event_callbacks (ones
262 	 * that have triggered, and whose callbacks need to be called).  Low
263 	 * priority numbers are more important, and stall higher ones.
264 	 */
265 	struct evcallback_list *activequeues;
266 	/** The length of the activequeues array */
267 	int nactivequeues;
268 	/** A list of event_callbacks that should become active the next time
269 	 * we process events, but not this time. */
270 	struct evcallback_list active_later_queue;
271 
272 	/* common timeout logic */
273 
274 	/** An array of common_timeout_list* for all of the common timeout
275 	 * values we know. */
276 	struct common_timeout_list **common_timeout_queues;
277 	/** The number of entries used in common_timeout_queues */
278 	int n_common_timeouts;
279 	/** The total size of common_timeout_queues. */
280 	int n_common_timeouts_allocated;
281 
282 	/** Mapping from file descriptors to enabled (added) events */
283 	struct event_io_map io;
284 
285 	/** Mapping from signal numbers to enabled (added) events. */
286 	struct event_signal_map sigmap;
287 
288 	/** Priority queue of events with timeouts. */
289 	struct min_heap timeheap;
290 
291 	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
292 	 * too often. */
293 	struct timeval tv_cache;
294 
295 	struct evutil_monotonic_timer monotonic_timer;
296 
297 	/** Difference between internal time (maybe from clock_gettime) and
298 	 * gettimeofday. */
299 	struct timeval tv_clock_diff;
300 	/** Second in which we last updated tv_clock_diff, in monotonic time. */
301 	time_t last_updated_clock_diff;
302 
303 #ifndef EVENT__DISABLE_THREAD_SUPPORT
304 	/* threading support */
305 	/** The thread currently running the event_loop for this base */
306 	unsigned long th_owner_id;
307 	/** A lock to prevent conflicting accesses to this event_base */
308 	void *th_base_lock;
309 	/** A condition that gets signalled when we're done processing an
310 	 * event with waiters on it. */
311 	void *current_event_cond;
312 	/** Number of threads blocking on current_event_cond. */
313 	int current_event_waiters;
314 #endif
315 	/** The event whose callback is executing right now */
316 	struct event_callback *current_event;
317 
318 #ifdef _WIN32
319 	/** IOCP support structure, if IOCP is enabled. */
320 	struct event_iocp_port *iocp;
321 #endif
322 
323 	/** Flags that this base was configured with */
324 	enum event_base_config_flag flags;
325 
326 	struct timeval max_dispatch_time;
327 	int max_dispatch_callbacks;
328 	int limit_callbacks_after_prio;
329 
330 	/* Notify main thread to wake up break, etc. */
331 	/** True if the base already has a pending notify, and we don't need
332 	 * to add any more. */
333 	int is_notify_pending;
334 	/** A socketpair used by some th_notify functions to wake up the main
335 	 * thread. */
336 	evutil_socket_t th_notify_fd[2];
337 	/** An event used by some th_notify functions to wake up the main
338 	 * thread. */
339 	struct event th_notify;
340 	/** A function used to wake up the main thread from another thread. */
341 	int (*th_notify_fn)(struct event_base *base);
342 
343 	/** Saved seed for weak random number generator. Some backends use
344 	 * this to produce fairness among sockets. Protected by th_base_lock. */
345 	struct evutil_weakrand_state weakrand_seed;
346 
347 	/** List of event_onces that have not yet fired. */
348 	LIST_HEAD(once_event_list, event_once) once_events;
349 
350 };
351 
352 struct event_config_entry {
353 	TAILQ_ENTRY(event_config_entry) next;
354 
355 	const char *avoid_method;
356 };
357 
358 /** Internal structure: describes the configuration we want for an event_base
359  * that we're about to allocate. */
360 struct event_config {
361 	TAILQ_HEAD(event_configq, event_config_entry) entries;
362 
363 	int n_cpus_hint;
364 	struct timeval max_dispatch_interval;
365 	int max_dispatch_callbacks;
366 	int limit_callbacks_after_prio;
367 	enum event_method_feature require_features;
368 	enum event_base_config_flag flags;
369 };
370 
371 /* Internal use only: Functions that might be missing from <sys/queue.h> */
372 #ifndef TAILQ_FIRST
373 #define	TAILQ_FIRST(head)		((head)->tqh_first)
374 #endif
375 #ifndef TAILQ_END
376 #define	TAILQ_END(head)			NULL
377 #endif
378 #ifndef TAILQ_NEXT
379 #define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
380 #endif
381 
382 #ifndef TAILQ_FOREACH
383 #define TAILQ_FOREACH(var, head, field)					\
384 	for ((var) = TAILQ_FIRST(head);					\
385 	     (var) != TAILQ_END(head);					\
386 	     (var) = TAILQ_NEXT(var, field))
387 #endif
388 
389 #ifndef TAILQ_INSERT_BEFORE
390 #define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
391 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
392 	(elm)->field.tqe_next = (listelm);				\
393 	*(listelm)->field.tqe_prev = (elm);				\
394 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
395 } while (/*CONSTCOND*/0)
396 #endif
397 
398 #define N_ACTIVE_CALLBACKS(base)					\
399 	((base)->event_count_active)
400 
401 int evsig_set_handler_(struct event_base *base, int evsignal,
402 			  void (*fn)(int));
403 int evsig_restore_handler_(struct event_base *base, int evsignal);
404 
405 int event_add_nolock_(struct event *ev,
406     const struct timeval *tv, int tv_is_absolute);
407 /** Argument for event_del_nolock_. Tells event_del not to block on the event
408  * if it's running in another thread. */
409 #define EVENT_DEL_NOBLOCK 0
410 /** Argument for event_del_nolock_. Tells event_del to block on the event
411  * if it's running in another thread, regardless of its value for EV_FINALIZE
412  */
413 #define EVENT_DEL_BLOCK 1
414 /** Argument for event_del_nolock_. Tells event_del to block on the event
415  * if it is running in another thread and it doesn't have EV_FINALIZE set.
416  */
417 #define EVENT_DEL_AUTOBLOCK 2
418 /** Argument for event_del_nolock_. Tells event_del to procede even if the
419  * event is set up for finalization rather for regular use.*/
420 #define EVENT_DEL_EVEN_IF_FINALIZING 3
421 int event_del_nolock_(struct event *ev, int blocking);
422 int event_remove_timer_nolock_(struct event *ev);
423 
424 void event_active_nolock_(struct event *ev, int res, short count);
425 int event_callback_activate_(struct event_base *, struct event_callback *);
426 int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
427 int event_callback_cancel_(struct event_base *base,
428     struct event_callback *evcb);
429 
430 void event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
431 void event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
432 int event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
433 
434 
435 void event_active_later_(struct event *ev, int res);
436 void event_active_later_nolock_(struct event *ev, int res);
437 int event_callback_activate_later_nolock_(struct event_base *base,
438     struct event_callback *evcb);
439 int event_callback_cancel_nolock_(struct event_base *base,
440     struct event_callback *evcb, int even_if_finalizing);
441 void event_callback_init_(struct event_base *base,
442     struct event_callback *cb);
443 
444 /* FIXME document. */
445 void event_base_add_virtual_(struct event_base *base);
446 void event_base_del_virtual_(struct event_base *base);
447 
448 /** For debugging: unless assertions are disabled, verify the referential
449     integrity of the internal data structures of 'base'.  This operation can
450     be expensive.
451 
452     Returns on success; aborts on failure.
453 */
454 void event_base_assert_ok_(struct event_base *base);
455 void event_base_assert_ok_nolock_(struct event_base *base);
456 
457 
458 /* Helper function: Call 'fn' exactly once every inserted or active event in
459  * the event_base 'base'.
460  *
461  * If fn returns 0, continue on to the next event. Otherwise, return the same
462  * value that fn returned.
463  *
464  * Requires that 'base' be locked.
465  */
466 int event_base_foreach_event_nolock_(struct event_base *base,
467     event_base_foreach_event_cb cb, void *arg);
468 
469 /* Cleanup function to reset debug mode during shutdown.
470  *
471  * Calling this function doesn't mean it'll be possible to re-enable
472  * debug mode if any events were added.
473  */
474 void event_disable_debug_mode(void);
475 
476 #ifdef __cplusplus
477 }
478 #endif
479 
480 #endif /* EVENT_INTERNAL_H_INCLUDED_ */
481