xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/event-internal.h (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /*	$NetBSD: event-internal.h,v 1.1.1.1 2013/12/27 23:31:16 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef EVENT_INTERNAL_H_INCLUDED_
30 #define EVENT_INTERNAL_H_INCLUDED_
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
36 #include "event2/event-config.h"
37 #include "evconfig-private.h"
38 
39 #include <time.h>
40 #include <sys/queue.h>
41 #include "event2/event_struct.h"
42 #include "minheap-internal.h"
43 #include "evsignal-internal.h"
44 #include "mm-internal.h"
45 #include "defer-internal.h"
46 
47 /* map union members back */
48 
49 /* mutually exclusive */
50 #define ev_signal_next	ev_.ev_signal.ev_signal_next
51 #define ev_io_next	ev_.ev_io.ev_io_next
52 #define ev_io_timeout	ev_.ev_io.ev_timeout
53 
54 /* used only by signals */
55 #define ev_ncalls	ev_.ev_signal.ev_ncalls
56 #define ev_pncalls	ev_.ev_signal.ev_pncalls
57 
58 #define ev_pri ev_evcallback.evcb_pri
59 #define ev_flags ev_evcallback.evcb_flags
60 #define ev_closure ev_evcallback.evcb_closure
61 #define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
62 #define ev_arg ev_evcallback.evcb_arg
63 
64 /* Possible values for evcb_closure in struct event_callback */
65 #define EV_CLOSURE_EVENT 0
66 #define EV_CLOSURE_EVENT_SIGNAL 1
67 #define EV_CLOSURE_EVENT_PERSIST 2
68 #define EV_CLOSURE_CB_SELF 3
69 
70 /** Structure to define the backend of a given event_base. */
71 struct eventop {
72 	/** The name of this backend. */
73 	const char *name;
74 	/** Function to set up an event_base to use this backend.  It should
75 	 * create a new structure holding whatever information is needed to
76 	 * run the backend, and return it.  The returned pointer will get
77 	 * stored by event_init into the event_base.evbase field.  On failure,
78 	 * this function should return NULL. */
79 	void *(*init)(struct event_base *);
80 	/** Enable reading/writing on a given fd or signal.  'events' will be
81 	 * the events that we're trying to enable: one or more of EV_READ,
82 	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
83 	 * were enabled on this fd previously.  'fdinfo' will be a structure
84 	 * associated with the fd by the evmap; its size is defined by the
85 	 * fdinfo field below.  It will be set to 0 the first time the fd is
86 	 * added.  The function should return 0 on success and -1 on error.
87 	 */
88 	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
89 	/** As "add", except 'events' contains the events we mean to disable. */
90 	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
91 	/** Function to implement the core of an event loop.  It must see which
92 	    added events are ready, and cause event_active to be called for each
93 	    active event (usually via event_io_active or such).  It should
94 	    return 0 on success and -1 on error.
95 	 */
96 	int (*dispatch)(struct event_base *, struct timeval *);
97 	/** Function to clean up and free our data from the event_base. */
98 	void (*dealloc)(struct event_base *);
99 	/** Flag: set if we need to reinitialize the event base after we fork.
100 	 */
101 	int need_reinit;
102 	/** Bit-array of supported event_method_features that this backend can
103 	 * provide. */
104 	enum event_method_feature features;
105 	/** Length of the extra information we should record for each fd that
106 	    has one or more active events.  This information is recorded
107 	    as part of the evmap entry for each fd, and passed as an argument
108 	    to the add and del functions above.
109 	 */
110 	size_t fdinfo_len;
111 };
112 
113 #ifdef _WIN32
114 /* If we're on win32, then file descriptors are not nice low densely packed
115    integers.  Instead, they are pointer-like windows handles, and we want to
116    use a hashtable instead of an array to map fds to events.
117 */
118 #define EVMAP_USE_HT
119 #endif
120 
121 /* #define HT_CACHE_HASH_VALS */
122 
123 #ifdef EVMAP_USE_HT
124 #define HT_NO_CACHE_HASH_VALUES
125 #include "ht-internal.h"
126 struct event_map_entry;
127 HT_HEAD(event_io_map, event_map_entry);
128 #else
129 #define event_io_map event_signal_map
130 #endif
131 
132 /* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
133    defined, this structure is also used as event_io_map, which maps fds to a
134    list of events.
135 */
136 struct event_signal_map {
137 	/* An array of evmap_io * or of evmap_signal *; empty entries are
138 	 * set to NULL. */
139 	void **entries;
140 	/* The number of entries available in entries */
141 	int nentries;
142 };
143 
144 /* A list of events waiting on a given 'common' timeout value.  Ordinarily,
145  * events waiting for a timeout wait on a minheap.  Sometimes, however, a
146  * queue can be faster.
147  **/
148 struct common_timeout_list {
149 	/* List of events currently waiting in the queue. */
150 	struct event_list events;
151 	/* 'magic' timeval used to indicate the duration of events in this
152 	 * queue. */
153 	struct timeval duration;
154 	/* Event that triggers whenever one of the events in the queue is
155 	 * ready to activate */
156 	struct event timeout_event;
157 	/* The event_base that this timeout list is part of */
158 	struct event_base *base;
159 };
160 
161 /** Mask used to get the real tv_usec value from a common timeout. */
162 #define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
163 
164 struct event_change;
165 
166 /* List of 'changes' since the last call to eventop.dispatch.  Only maintained
167  * if the backend is using changesets. */
168 struct event_changelist {
169 	struct event_change *changes;
170 	int n_changes;
171 	int changes_size;
172 };
173 
174 #ifndef EVENT__DISABLE_DEBUG_MODE
175 /* Global internal flag: set to one if debug mode is on. */
176 extern int event_debug_mode_on_;
177 #define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
178 #else
179 #define EVENT_DEBUG_MODE_IS_ON() (0)
180 #endif
181 
182 TAILQ_HEAD(evcallback_list, event_callback);
183 
184 /* Sets up an event for processing once */
185 struct event_once {
186 	LIST_ENTRY(event_once) next_once;
187 	struct event ev;
188 
189 	void (*cb)(evutil_socket_t, short, void *);
190 	void *arg;
191 };
192 
193 struct event_base {
194 	/** Function pointers and other data to describe this event_base's
195 	 * backend. */
196 	const struct eventop *evsel;
197 	/** Pointer to backend-specific data. */
198 	void *evbase;
199 
200 	/** List of changes to tell backend about at next dispatch.  Only used
201 	 * by the O(1) backends. */
202 	struct event_changelist changelist;
203 
204 	/** Function pointers used to describe the backend that this event_base
205 	 * uses for signals */
206 	const struct eventop *evsigsel;
207 	/** Data to implement the common signal handelr code. */
208 	struct evsig_info sig;
209 
210 	/** Number of virtual events */
211 	int virtual_event_count;
212 	/** Number of total events added to this event_base */
213 	int event_count;
214 	/** Number of total events active in this event_base */
215 	int event_count_active;
216 
217 	/** Set if we should terminate the loop once we're done processing
218 	 * events. */
219 	int event_gotterm;
220 	/** Set if we should terminate the loop immediately */
221 	int event_break;
222 	/** Set if we should start a new instance of the loop immediately. */
223 	int event_continue;
224 
225 	/** The currently running priority of events */
226 	int event_running_priority;
227 
228 	/** Set if we're running the event_base_loop function, to prevent
229 	 * reentrant invocation. */
230 	int running_loop;
231 
232 	/** Set to the number of deferred_cbs we've made 'active' in the
233 	 * loop.  This is a hack to prevent starvation; it would be smarter
234 	 * to just use event_config_set_max_dispatch_interval's max_callbacks
235 	 * feature */
236 	int n_deferreds_queued;
237 
238 	/* Active event management. */
239 	/** An array of nactivequeues queues for active event_callbacks (ones
240 	 * that have triggered, and whose callbacks need to be called).  Low
241 	 * priority numbers are more important, and stall higher ones.
242 	 */
243 	struct evcallback_list *activequeues;
244 	/** The length of the activequeues array */
245 	int nactivequeues;
246 	/** A list of event_callbacks that should become active the next time
247 	 * we process events, but not this time. */
248 	struct evcallback_list active_later_queue;
249 
250 	/* common timeout logic */
251 
252 	/** An array of common_timeout_list* for all of the common timeout
253 	 * values we know. */
254 	struct common_timeout_list **common_timeout_queues;
255 	/** The number of entries used in common_timeout_queues */
256 	int n_common_timeouts;
257 	/** The total size of common_timeout_queues. */
258 	int n_common_timeouts_allocated;
259 
260 	/** Mapping from file descriptors to enabled (added) events */
261 	struct event_io_map io;
262 
263 	/** Mapping from signal numbers to enabled (added) events. */
264 	struct event_signal_map sigmap;
265 
266 	/** Priority queue of events with timeouts. */
267 	struct min_heap timeheap;
268 
269 	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
270 	 * too often. */
271 	struct timeval tv_cache;
272 
273 	struct evutil_monotonic_timer monotonic_timer;
274 
275 	/** Difference between internal time (maybe from clock_gettime) and
276 	 * gettimeofday. */
277 	struct timeval tv_clock_diff;
278 	/** Second in which we last updated tv_clock_diff, in monotonic time. */
279 	time_t last_updated_clock_diff;
280 
281 #ifndef EVENT__DISABLE_THREAD_SUPPORT
282 	/* threading support */
283 	/** The thread currently running the event_loop for this base */
284 	unsigned long th_owner_id;
285 	/** A lock to prevent conflicting accesses to this event_base */
286 	void *th_base_lock;
287 	/** A condition that gets signalled when we're done processing an
288 	 * event with waiters on it. */
289 	void *current_event_cond;
290 	/** Number of threads blocking on current_event_cond. */
291 	int current_event_waiters;
292 #endif
293 	/** The event whose callback is executing right now */
294 	struct event_callback *current_event;
295 
296 #ifdef _WIN32
297 	/** IOCP support structure, if IOCP is enabled. */
298 	struct event_iocp_port *iocp;
299 #endif
300 
301 	/** Flags that this base was configured with */
302 	enum event_base_config_flag flags;
303 
304 	struct timeval max_dispatch_time;
305 	int max_dispatch_callbacks;
306 	int limit_callbacks_after_prio;
307 
308 	/* Notify main thread to wake up break, etc. */
309 	/** True if the base already has a pending notify, and we don't need
310 	 * to add any more. */
311 	int is_notify_pending;
312 	/** A socketpair used by some th_notify functions to wake up the main
313 	 * thread. */
314 	evutil_socket_t th_notify_fd[2];
315 	/** An event used by some th_notify functions to wake up the main
316 	 * thread. */
317 	struct event th_notify;
318 	/** A function used to wake up the main thread from another thread. */
319 	int (*th_notify_fn)(struct event_base *base);
320 
321 	/** Saved seed for weak random number generator. Some backends use
322 	 * this to produce fairness among sockets. Protected by th_base_lock. */
323 	struct evutil_weakrand_state weakrand_seed;
324 
325 	/** List of event_onces that have not yet fired. */
326 	LIST_HEAD(once_event_list, event_once) once_events;
327 
328 };
329 
330 struct event_config_entry {
331 	TAILQ_ENTRY(event_config_entry) next;
332 
333 	const char *avoid_method;
334 };
335 
336 /** Internal structure: describes the configuration we want for an event_base
337  * that we're about to allocate. */
338 struct event_config {
339 	TAILQ_HEAD(event_configq, event_config_entry) entries;
340 
341 	int n_cpus_hint;
342 	struct timeval max_dispatch_interval;
343 	int max_dispatch_callbacks;
344 	int limit_callbacks_after_prio;
345 	enum event_method_feature require_features;
346 	enum event_base_config_flag flags;
347 };
348 
349 /* Internal use only: Functions that might be missing from <sys/queue.h> */
350 #if defined(EVENT__HAVE_SYS_QUEUE_H) && !defined(EVENT__HAVE_TAILQFOREACH)
351 #ifndef TAILQ_FIRST
352 #define	TAILQ_FIRST(head)		((head)->tqh_first)
353 #endif
354 #ifndef TAILQ_END
355 #define	TAILQ_END(head)			NULL
356 #endif
357 #ifndef TAILQ_NEXT
358 #define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
359 #endif
360 
361 #ifndef TAILQ_FOREACH
362 #define TAILQ_FOREACH(var, head, field)					\
363 	for ((var) = TAILQ_FIRST(head);					\
364 	     (var) != TAILQ_END(head);					\
365 	     (var) = TAILQ_NEXT(var, field))
366 #endif
367 
368 #ifndef TAILQ_INSERT_BEFORE
369 #define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
370 	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
371 	(elm)->field.tqe_next = (listelm);				\
372 	*(listelm)->field.tqe_prev = (elm);				\
373 	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
374 } while (0)
375 #endif
376 #endif /* TAILQ_FOREACH */
377 
378 #define N_ACTIVE_CALLBACKS(base)					\
379 	((base)->event_count_active)
380 
381 int evsig_set_handler_(struct event_base *base, int evsignal,
382 			  void (*fn)(int));
383 int evsig_restore_handler_(struct event_base *base, int evsignal);
384 
385 int event_add_nolock_(struct event *ev,
386     const struct timeval *tv, int tv_is_absolute);
387 int event_del_nolock_(struct event *ev);
388 int event_remove_timer_nolock_(struct event *ev);
389 
390 void event_active_nolock_(struct event *ev, int res, short count);
391 int event_callback_activate_(struct event_base *, struct event_callback *);
392 int event_callback_activate_nolock_(struct event_base *, struct event_callback *);
393 int event_callback_cancel_(struct event_base *base,
394     struct event_callback *evcb);
395 
396 void event_active_later_(struct event *ev, int res);
397 void event_active_later_nolock_(struct event *ev, int res);
398 void event_callback_activate_later_nolock_(struct event_base *base,
399     struct event_callback *evcb);
400 int event_callback_cancel_nolock_(struct event_base *base,
401     struct event_callback *evcb);
402 void event_callback_init_(struct event_base *base,
403     struct event_callback *cb);
404 
405 /* FIXME document. */
406 void event_base_add_virtual_(struct event_base *base);
407 void event_base_del_virtual_(struct event_base *base);
408 
409 /** For debugging: unless assertions are disabled, verify the referential
410     integrity of the internal data structures of 'base'.  This operation can
411     be expensive.
412 
413     Returns on success; aborts on failure.
414 */
415 void event_base_assert_ok_(struct event_base *base);
416 void event_base_assert_ok_nolock_(struct event_base *base);
417 
418 
419 /* Helper function: Call 'fn' exactly once every inserted or active event in
420  * the event_base 'base'.
421  *
422  * If fn returns 0, continue on to the next event. Otherwise, return the same
423  * value that fn returned.
424  *
425  * Requires that 'base' be locked.
426  */
427 int event_base_foreach_event_nolock_(struct event_base *base,
428     event_base_foreach_event_cb cb, void *arg);
429 
430 #ifdef __cplusplus
431 }
432 #endif
433 
434 #endif /* EVENT_INTERNAL_H_INCLUDED_ */
435