xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/event.c (revision 413d532bcc3f62d122e56d92e13ac64825a40baf)
1 /*	$NetBSD: event.c,v 1.1.1.1 2013/12/27 23:31:16 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifdef _WIN32
33 #include <winsock2.h>
34 #define WIN32_LEAN_AND_MEAN
35 #include <windows.h>
36 #undef WIN32_LEAN_AND_MEAN
37 #endif
38 #include <sys/types.h>
39 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/time.h>
41 #endif
42 #include <sys/queue.h>
43 #ifdef EVENT__HAVE_SYS_SOCKET_H
44 #include <sys/socket.h>
45 #endif
46 #include <stdio.h>
47 #include <stdlib.h>
48 #ifdef EVENT__HAVE_UNISTD_H
49 #include <unistd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
56 #include <limits.h>
57 
58 #include "event2/event.h"
59 #include "event2/event_struct.h"
60 #include "event2/event_compat.h"
61 #include "event-internal.h"
62 #include "defer-internal.h"
63 #include "evthread-internal.h"
64 #include "event2/thread.h"
65 #include "event2/util.h"
66 #include "log-internal.h"
67 #include "evmap-internal.h"
68 #include "iocp-internal.h"
69 #include "changelist-internal.h"
70 #define HT_NO_CACHE_HASH_VALUES
71 #include "ht-internal.h"
72 #include "util-internal.h"
73 
74 
75 #ifdef EVENT__HAVE_WORKING_KQUEUE
76 #include "kqueue-internal.h"
77 #endif
78 
79 #ifdef EVENT__HAVE_EVENT_PORTS
80 extern const struct eventop evportops;
81 #endif
82 #ifdef EVENT__HAVE_SELECT
83 extern const struct eventop selectops;
84 #endif
85 #ifdef EVENT__HAVE_POLL
86 extern const struct eventop pollops;
87 #endif
88 #ifdef EVENT__HAVE_EPOLL
89 extern const struct eventop epollops;
90 #endif
91 #ifdef EVENT__HAVE_WORKING_KQUEUE
92 extern const struct eventop kqops;
93 #endif
94 #ifdef EVENT__HAVE_DEVPOLL
95 extern const struct eventop devpollops;
96 #endif
97 #ifdef _WIN32
98 extern const struct eventop win32ops;
99 #endif
100 
101 /* Array of backends in order of preference. */
102 static const struct eventop *eventops[] = {
103 #ifdef EVENT__HAVE_EVENT_PORTS
104 	&evportops,
105 #endif
106 #ifdef EVENT__HAVE_WORKING_KQUEUE
107 	&kqops,
108 #endif
109 #ifdef EVENT__HAVE_EPOLL
110 	&epollops,
111 #endif
112 #ifdef EVENT__HAVE_DEVPOLL
113 	&devpollops,
114 #endif
115 #ifdef EVENT__HAVE_POLL
116 	&pollops,
117 #endif
118 #ifdef EVENT__HAVE_SELECT
119 	&selectops,
120 #endif
121 #ifdef _WIN32
122 	&win32ops,
123 #endif
124 	NULL
125 };
126 
127 /* Global state; deprecated */
128 struct event_base *event_global_current_base_ = NULL;
129 #define current_base event_global_current_base_
130 
131 /* Global state */
132 
133 static void *event_self_cbarg_ptr_ = NULL;
134 
135 /* Prototypes */
136 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
137 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
138 static void	event_queue_insert_timeout(struct event_base *, struct event *);
139 static void	event_queue_insert_inserted(struct event_base *, struct event *);
140 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
141 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
142 static void	event_queue_remove_timeout(struct event_base *, struct event *);
143 static void	event_queue_remove_inserted(struct event_base *, struct event *);
144 static void event_queue_make_later_events_active(struct event_base *base);
145 
146 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
147 
148 
149 #ifdef USE_REINSERT_TIMEOUT
150 /* This code seems buggy; only turn it on if we find out what the trouble is. */
151 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
152 #endif
153 
154 static int	event_haveevents(struct event_base *);
155 
156 static int	event_process_active(struct event_base *);
157 
158 static int	timeout_next(struct event_base *, struct timeval **);
159 static void	timeout_process(struct event_base *);
160 
161 static inline void	event_signal_closure(struct event_base *, struct event *ev);
162 static inline void	event_persist_closure(struct event_base *, struct event *ev);
163 
164 static int	evthread_notify_base(struct event_base *base);
165 
166 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
167     struct event *ev);
168 
169 #ifndef EVENT__DISABLE_DEBUG_MODE
170 /* These functions implement a hashtable of which 'struct event *' structures
171  * have been setup or added.  We don't want to trust the content of the struct
172  * event itself, since we're trying to work through cases where an event gets
173  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
174  */
175 
176 struct event_debug_entry {
177 	HT_ENTRY(event_debug_entry) node;
178 	const struct event *ptr;
179 	unsigned added : 1;
180 };
181 
182 static inline unsigned
183 hash_debug_entry(const struct event_debug_entry *e)
184 {
185 	/* We need to do this silliness to convince compilers that we
186 	 * honestly mean to cast e->ptr to an integer, and discard any
187 	 * part of it that doesn't fit in an unsigned.
188 	 */
189 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
190 	/* Our hashtable implementation is pretty sensitive to low bits,
191 	 * and every struct event is over 64 bytes in size, so we can
192 	 * just say >>6. */
193 	return (u >> 6);
194 }
195 
196 static inline int
197 eq_debug_entry(const struct event_debug_entry *a,
198     const struct event_debug_entry *b)
199 {
200 	return a->ptr == b->ptr;
201 }
202 
203 int event_debug_mode_on_ = 0;
204 /* Set if it's too late to enable event_debug_mode. */
205 static int event_debug_mode_too_late = 0;
206 #ifndef EVENT__DISABLE_THREAD_SUPPORT
207 static void *event_debug_map_lock_ = NULL;
208 #endif
209 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
210 	HT_INITIALIZER();
211 
212 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213     eq_debug_entry)
214 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
215     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
216 
217 /* Macro: record that ev is now setup (that is, ready for an add) */
218 #define event_debug_note_setup_(ev) do {				\
219 	if (event_debug_mode_on_) {					\
220 		struct event_debug_entry *dent,find;			\
221 		find.ptr = (ev);					\
222 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
223 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
224 		if (dent) {						\
225 			dent->added = 0;				\
226 		} else {						\
227 			dent = mm_malloc(sizeof(*dent));		\
228 			if (!dent)					\
229 				event_err(1,				\
230 				    "Out of memory in debugging code");	\
231 			dent->ptr = (ev);				\
232 			dent->added = 0;				\
233 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
234 		}							\
235 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
236 	}								\
237 	event_debug_mode_too_late = 1;					\
238 	} while (0)
239 /* Macro: record that ev is no longer setup */
240 #define event_debug_note_teardown_(ev) do {				\
241 	if (event_debug_mode_on_) {					\
242 		struct event_debug_entry *dent,find;			\
243 		find.ptr = (ev);					\
244 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
245 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
246 		if (dent)						\
247 			mm_free(dent);					\
248 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
249 	}								\
250 	event_debug_mode_too_late = 1;					\
251 	} while (0)
252 /* Macro: record that ev is now added */
253 #define event_debug_note_add_(ev)	do {				\
254 	if (event_debug_mode_on_) {					\
255 		struct event_debug_entry *dent,find;			\
256 		find.ptr = (ev);					\
257 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
258 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
259 		if (dent) {						\
260 			dent->added = 1;				\
261 		} else {						\
262 			event_errx(EVENT_ERR_ABORT_,			\
263 			    "%s: noting an add on a non-setup event %p" \
264 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
265 			    ", flags: 0x%x)",				\
266 			    __func__, (ev), (ev)->ev_events,		\
267 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
268 		}							\
269 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
270 	}								\
271 	event_debug_mode_too_late = 1;					\
272 	} while (0)
273 /* Macro: record that ev is no longer added */
274 #define event_debug_note_del_(ev) do {					\
275 	if (event_debug_mode_on_) {					\
276 		struct event_debug_entry *dent,find;			\
277 		find.ptr = (ev);					\
278 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
279 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
280 		if (dent) {						\
281 			dent->added = 0;				\
282 		} else {						\
283 			event_errx(EVENT_ERR_ABORT_,			\
284 			    "%s: noting a del on a non-setup event %p"	\
285 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
286 			    ", flags: 0x%x)",				\
287 			    __func__, (ev), (ev)->ev_events,		\
288 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
289 		}							\
290 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
291 	}								\
292 	event_debug_mode_too_late = 1;					\
293 	} while (0)
294 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
295 #define event_debug_assert_is_setup_(ev) do {				\
296 	if (event_debug_mode_on_) {					\
297 		struct event_debug_entry *dent,find;			\
298 		find.ptr = (ev);					\
299 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
300 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
301 		if (!dent) {						\
302 			event_errx(EVENT_ERR_ABORT_,			\
303 			    "%s called on a non-initialized event %p"	\
304 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
305 			    ", flags: 0x%x)",				\
306 			    __func__, (ev), (ev)->ev_events,		\
307 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
308 		}							\
309 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
310 	}								\
311 	} while (0)
312 /* Macro: assert that ev is not added (i.e., okay to tear down or set
313  * up again) */
314 #define event_debug_assert_not_added_(ev) do {				\
315 	if (event_debug_mode_on_) {					\
316 		struct event_debug_entry *dent,find;			\
317 		find.ptr = (ev);					\
318 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
319 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
320 		if (dent && dent->added) {				\
321 			event_errx(EVENT_ERR_ABORT_,			\
322 			    "%s called on an already added event %p"	\
323 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
324 			    "flags: 0x%x)",				\
325 			    __func__, (ev), (ev)->ev_events,		\
326 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
327 		}							\
328 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
329 	}								\
330 	} while (0)
331 #else
332 #define event_debug_note_setup_(ev) \
333 	((void)0)
334 #define event_debug_note_teardown_(ev) \
335 	((void)0)
336 #define event_debug_note_add_(ev) \
337 	((void)0)
338 #define event_debug_note_del_(ev) \
339 	((void)0)
340 #define event_debug_assert_is_setup_(ev) \
341 	((void)0)
342 #define event_debug_assert_not_added_(ev) \
343 	((void)0)
344 #endif
345 
346 #define EVENT_BASE_ASSERT_LOCKED(base)		\
347 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
348 
349 /* How often (in seconds) do we check for changes in wall clock time relative
350  * to monotonic time?  Set this to -1 for 'never.' */
351 #define CLOCK_SYNC_INTERVAL 5
352 
353 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
354  * on 'base'.  If there is a cached time, return it.  Otherwise, use
355  * clock_gettime or gettimeofday as appropriate to find out the right time.
356  * Return 0 on success, -1 on failure.
357  */
358 static int
359 gettime(struct event_base *base, struct timeval *tp)
360 {
361 	EVENT_BASE_ASSERT_LOCKED(base);
362 
363 	if (base->tv_cache.tv_sec) {
364 		*tp = base->tv_cache;
365 		return (0);
366 	}
367 
368 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
369 		return -1;
370 	}
371 
372 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
373 	    < tp->tv_sec) {
374 		struct timeval tv;
375 		evutil_gettimeofday(&tv,NULL);
376 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
377 		base->last_updated_clock_diff = tp->tv_sec;
378 	}
379 
380 	return 0;
381 }
382 
383 int
384 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
385 {
386 	int r;
387 	if (!base) {
388 		base = current_base;
389 		if (!current_base)
390 			return evutil_gettimeofday(tv, NULL);
391 	}
392 
393 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
394 	if (base->tv_cache.tv_sec == 0) {
395 		r = evutil_gettimeofday(tv, NULL);
396 	} else {
397 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
398 		r = 0;
399 	}
400 	EVBASE_RELEASE_LOCK(base, th_base_lock);
401 	return r;
402 }
403 
404 /** Make 'base' have no current cached time. */
405 static inline void
406 clear_time_cache(struct event_base *base)
407 {
408 	base->tv_cache.tv_sec = 0;
409 }
410 
411 /** Replace the cached time in 'base' with the current time. */
412 static inline void
413 update_time_cache(struct event_base *base)
414 {
415 	base->tv_cache.tv_sec = 0;
416 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
417 	    gettime(base, &base->tv_cache);
418 }
419 
420 int
421 event_base_update_cache_time(struct event_base *base)
422 {
423 
424 	if (!base) {
425 		base = current_base;
426 		if (!current_base)
427 			return -1;
428 	}
429 
430 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
431 	update_time_cache(base);
432 	EVBASE_RELEASE_LOCK(base, th_base_lock);
433 	return 0;
434 }
435 
436 static inline struct event *
437 event_callback_to_event(struct event_callback *evcb)
438 {
439 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
440 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
441 }
442 
443 static inline struct event_callback *
444 event_to_event_callback(struct event *ev)
445 {
446 	return &ev->ev_evcallback;
447 }
448 
449 struct event_base *
450 event_init(void)
451 {
452 	struct event_base *base = event_base_new_with_config(NULL);
453 
454 	if (base == NULL) {
455 		event_errx(1, "%s: Unable to construct event_base", __func__);
456 		return NULL;
457 	}
458 
459 	current_base = base;
460 
461 	return (base);
462 }
463 
464 struct event_base *
465 event_base_new(void)
466 {
467 	struct event_base *base = NULL;
468 	struct event_config *cfg = event_config_new();
469 	if (cfg) {
470 		base = event_base_new_with_config(cfg);
471 		event_config_free(cfg);
472 	}
473 	return base;
474 }
475 
476 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
477  * avoid. */
478 static int
479 event_config_is_avoided_method(const struct event_config *cfg,
480     const char *method)
481 {
482 	struct event_config_entry *entry;
483 
484 	TAILQ_FOREACH(entry, &cfg->entries, next) {
485 		if (entry->avoid_method != NULL &&
486 		    strcmp(entry->avoid_method, method) == 0)
487 			return (1);
488 	}
489 
490 	return (0);
491 }
492 
493 /** Return true iff 'method' is disabled according to the environment. */
494 static int
495 event_is_method_disabled(const char *name)
496 {
497 	char environment[64];
498 	int i;
499 
500 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
501 	for (i = 8; environment[i] != '\0'; ++i)
502 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
503 	/* Note that evutil_getenv_() ignores the environment entirely if
504 	 * we're setuid */
505 	return (evutil_getenv_(environment) != NULL);
506 }
507 
508 int
509 event_base_get_features(const struct event_base *base)
510 {
511 	return base->evsel->features;
512 }
513 
514 void
515 event_enable_debug_mode(void)
516 {
517 #ifndef EVENT__DISABLE_DEBUG_MODE
518 	if (event_debug_mode_on_)
519 		event_errx(1, "%s was called twice!", __func__);
520 	if (event_debug_mode_too_late)
521 		event_errx(1, "%s must be called *before* creating any events "
522 		    "or event_bases",__func__);
523 
524 	event_debug_mode_on_ = 1;
525 
526 	HT_INIT(event_debug_map, &global_debug_map);
527 #endif
528 }
529 
530 #if 0
531 void
532 event_disable_debug_mode(void)
533 {
534 	struct event_debug_entry **ent, *victim;
535 
536 	EVLOCK_LOCK(event_debug_map_lock_, 0);
537 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
538 		victim = *ent;
539 		ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
540 		mm_free(victim);
541 	}
542 	HT_CLEAR(event_debug_map, &global_debug_map);
543 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
544 }
545 #endif
546 
547 struct event_base *
548 event_base_new_with_config(const struct event_config *cfg)
549 {
550 	int i;
551 	struct event_base *base;
552 	int should_check_environment;
553 
554 #ifndef EVENT__DISABLE_DEBUG_MODE
555 	event_debug_mode_too_late = 1;
556 #endif
557 
558 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
559 		event_warn("%s: calloc", __func__);
560 		return NULL;
561 	}
562 
563 	if (cfg)
564 		base->flags = cfg->flags;
565 
566 	should_check_environment =
567 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
568 
569 	{
570 		struct timeval tmp;
571 		int precise_time =
572 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
573 		int flags;
574 		if (should_check_environment && !precise_time) {
575 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
576 			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
577 		}
578 		flags = precise_time ? EV_MONOT_PRECISE : 0;
579 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
580 
581 		gettime(base, &tmp);
582 	}
583 
584 	min_heap_ctor_(&base->timeheap);
585 
586 	base->sig.ev_signal_pair[0] = -1;
587 	base->sig.ev_signal_pair[1] = -1;
588 	base->th_notify_fd[0] = -1;
589 	base->th_notify_fd[1] = -1;
590 
591 	TAILQ_INIT(&base->active_later_queue);
592 
593 	evmap_io_initmap_(&base->io);
594 	evmap_signal_initmap_(&base->sigmap);
595 	event_changelist_init_(&base->changelist);
596 
597 	base->evbase = NULL;
598 
599 	if (cfg) {
600 		memcpy(&base->max_dispatch_time,
601 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
602 		base->limit_callbacks_after_prio =
603 		    cfg->limit_callbacks_after_prio;
604 	} else {
605 		base->max_dispatch_time.tv_sec = -1;
606 		base->limit_callbacks_after_prio = 1;
607 	}
608 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
609 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
610 	} else {
611 		base->max_dispatch_callbacks = INT_MAX;
612 	}
613 	if (base->max_dispatch_callbacks == INT_MAX &&
614 	    base->max_dispatch_time.tv_sec == -1)
615 		base->limit_callbacks_after_prio = INT_MAX;
616 
617 	for (i = 0; eventops[i] && !base->evbase; i++) {
618 		if (cfg != NULL) {
619 			/* determine if this backend should be avoided */
620 			if (event_config_is_avoided_method(cfg,
621 				eventops[i]->name))
622 				continue;
623 			if ((eventops[i]->features & cfg->require_features)
624 			    != cfg->require_features)
625 				continue;
626 		}
627 
628 		/* also obey the environment variables */
629 		if (should_check_environment &&
630 		    event_is_method_disabled(eventops[i]->name))
631 			continue;
632 
633 		base->evsel = eventops[i];
634 
635 		base->evbase = base->evsel->init(base);
636 	}
637 
638 	if (base->evbase == NULL) {
639 		event_warnx("%s: no event mechanism available",
640 		    __func__);
641 		base->evsel = NULL;
642 		event_base_free(base);
643 		return NULL;
644 	}
645 
646 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
647 		event_msgx("libevent using: %s", base->evsel->name);
648 
649 	/* allocate a single active event queue */
650 	if (event_base_priority_init(base, 1) < 0) {
651 		event_base_free(base);
652 		return NULL;
653 	}
654 
655 	/* prepare for threading */
656 
657 #ifndef EVENT__DISABLE_THREAD_SUPPORT
658 	if (EVTHREAD_LOCKING_ENABLED() &&
659 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
660 		int r;
661 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
662 		EVTHREAD_ALLOC_COND(base->current_event_cond);
663 		r = evthread_make_base_notifiable(base);
664 		if (r<0) {
665 			event_warnx("%s: Unable to make base notifiable.", __func__);
666 			event_base_free(base);
667 			return NULL;
668 		}
669 	}
670 #endif
671 
672 #ifdef _WIN32
673 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
674 		event_base_start_iocp_(base, cfg->n_cpus_hint);
675 #endif
676 
677 	return (base);
678 }
679 
680 int
681 event_base_start_iocp_(struct event_base *base, int n_cpus)
682 {
683 #ifdef _WIN32
684 	if (base->iocp)
685 		return 0;
686 	base->iocp = event_iocp_port_launch_(n_cpus);
687 	if (!base->iocp) {
688 		event_warnx("%s: Couldn't launch IOCP", __func__);
689 		return -1;
690 	}
691 	return 0;
692 #else
693 	return -1;
694 #endif
695 }
696 
697 void
698 event_base_stop_iocp_(struct event_base *base)
699 {
700 #ifdef _WIN32
701 	int rv;
702 
703 	if (!base->iocp)
704 		return;
705 	rv = event_iocp_shutdown_(base->iocp, -1);
706 	EVUTIL_ASSERT(rv >= 0);
707 	base->iocp = NULL;
708 #endif
709 }
710 
711 void
712 event_base_free(struct event_base *base)
713 {
714 	int i, n_deleted=0;
715 	struct event *ev;
716 	/* XXXX grab the lock? If there is contention when one thread frees
717 	 * the base, then the contending thread will be very sad soon. */
718 
719 	/* event_base_free(NULL) is how to free the current_base if we
720 	 * made it with event_init and forgot to hold a reference to it. */
721 	if (base == NULL && current_base)
722 		base = current_base;
723 	/* If we're freeing current_base, there won't be a current_base. */
724 	if (base == current_base)
725 		current_base = NULL;
726 	/* Don't actually free NULL. */
727 	if (base == NULL) {
728 		event_warnx("%s: no base to free", __func__);
729 		return;
730 	}
731 	/* XXX(niels) - check for internal events first */
732 
733 #ifdef _WIN32
734 	event_base_stop_iocp_(base);
735 #endif
736 
737 	/* threading fds if we have them */
738 	if (base->th_notify_fd[0] != -1) {
739 		event_del(&base->th_notify);
740 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
741 		if (base->th_notify_fd[1] != -1)
742 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
743 		base->th_notify_fd[0] = -1;
744 		base->th_notify_fd[1] = -1;
745 		event_debug_unassign(&base->th_notify);
746 	}
747 
748 	/* Delete all non-internal events. */
749 	evmap_delete_all_(base);
750 
751 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
752 		event_del(ev);
753 		++n_deleted;
754 	}
755 	for (i = 0; i < base->n_common_timeouts; ++i) {
756 		struct common_timeout_list *ctl =
757 		    base->common_timeout_queues[i];
758 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
759 		event_debug_unassign(&ctl->timeout_event);
760 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
761 			struct event *next = TAILQ_NEXT(ev,
762 			    ev_timeout_pos.ev_next_with_common_timeout);
763 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
764 				event_del(ev);
765 				++n_deleted;
766 			}
767 			ev = next;
768 		}
769 		mm_free(ctl);
770 	}
771 	if (base->common_timeout_queues)
772 		mm_free(base->common_timeout_queues);
773 
774 	for (i = 0; i < base->nactivequeues; ++i) {
775 		struct event_callback *evcb, *next;
776 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
777 			next = TAILQ_NEXT(evcb, evcb_active_next);
778 			if (evcb->evcb_flags & EVLIST_INIT) {
779 				ev = event_callback_to_event(evcb);
780 				if (!(ev->ev_flags & EVLIST_INTERNAL)) {
781 					event_del(ev);
782 					++n_deleted;
783 				}
784 			} else {
785 				event_callback_cancel_(base, evcb);
786 				++n_deleted;
787 			}
788 			evcb = next;
789 		}
790 	}
791 	{
792 		struct event_callback *evcb;
793 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
794 			if (evcb->evcb_flags & EVLIST_INIT) {
795 				ev = event_callback_to_event(evcb);
796 				event_del(ev);
797 				++n_deleted;
798 			} else {
799 				event_callback_cancel_(base, evcb);
800 				++n_deleted;
801 			}
802 		}
803 	}
804 
805 
806 	if (n_deleted)
807 		event_debug(("%s: %d events were still set in base",
808 			__func__, n_deleted));
809 
810 	while (LIST_FIRST(&base->once_events)) {
811 		struct event_once *eonce = LIST_FIRST(&base->once_events);
812 		LIST_REMOVE(eonce, next_once);
813 		mm_free(eonce);
814 	}
815 
816 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
817 		base->evsel->dealloc(base);
818 
819 	for (i = 0; i < base->nactivequeues; ++i)
820 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
821 
822 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
823 	min_heap_dtor_(&base->timeheap);
824 
825 	mm_free(base->activequeues);
826 
827 	evmap_io_clear_(&base->io);
828 	evmap_signal_clear_(&base->sigmap);
829 	event_changelist_freemem_(&base->changelist);
830 
831 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
832 	EVTHREAD_FREE_COND(base->current_event_cond);
833 
834 	mm_free(base);
835 }
836 
837 /* Fake eventop; used to disable the backend temporarily inside event_reinit
838  * so that we can call event_del() on an event without telling the backend.
839  */
840 static int
841 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
842     short events, void *fdinfo)
843 {
844 	return 0;
845 }
846 const struct eventop nil_eventop = {
847 	"nil",
848 	NULL, /* init: unused. */
849 	NULL, /* add: unused. */
850 	nil_backend_del, /* del: used, so needs to be killed. */
851 	NULL, /* dispatch: unused. */
852 	NULL, /* dealloc: unused. */
853 	0, 0, 0
854 };
855 
856 /* reinitialize the event base after a fork */
857 int
858 event_reinit(struct event_base *base)
859 {
860 	const struct eventop *evsel;
861 	int res = 0;
862 	int was_notifiable = 0;
863 	int had_signal_added = 0;
864 
865 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
866 
867 	evsel = base->evsel;
868 
869 	/* check if this event mechanism requires reinit on the backend */
870 	if (evsel->need_reinit) {
871 		/* We're going to call event_del() on our notify events (the
872 		 * ones that tell about signals and wakeup events).  But we
873 		 * don't actually want to tell the backend to change its
874 		 * state, since it might still share some resource (a kqueue,
875 		 * an epoll fd) with the parent process, and we don't want to
876 		 * delete the fds from _that_ backend, we temporarily stub out
877 		 * the evsel with a replacement.
878 		 */
879 		base->evsel = &nil_eventop;
880 	}
881 
882 	/* We need to re-create a new signal-notification fd and a new
883 	 * thread-notification fd.  Otherwise, we'll still share those with
884 	 * the parent process, which would make any notification sent to them
885 	 * get received by one or both of the event loops, more or less at
886 	 * random.
887 	 */
888 	if (base->sig.ev_signal_added) {
889 		event_del_nolock_(&base->sig.ev_signal);
890 		event_debug_unassign(&base->sig.ev_signal);
891 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
892 		if (base->sig.ev_signal_pair[0] != -1)
893 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
894 		if (base->sig.ev_signal_pair[1] != -1)
895 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
896 		had_signal_added = 1;
897 		base->sig.ev_signal_added = 0;
898 	}
899 	if (base->th_notify_fn != NULL) {
900 		was_notifiable = 1;
901 		base->th_notify_fn = NULL;
902 	}
903 	if (base->th_notify_fd[0] != -1) {
904 		event_del_nolock_(&base->th_notify);
905 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
906 		if (base->th_notify_fd[1] != -1)
907 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
908 		base->th_notify_fd[0] = -1;
909 		base->th_notify_fd[1] = -1;
910 		event_debug_unassign(&base->th_notify);
911 	}
912 
913 	/* Replace the original evsel. */
914         base->evsel = evsel;
915 
916 	if (evsel->need_reinit) {
917 		/* Reconstruct the backend through brute-force, so that we do
918 		 * not share any structures with the parent process. For some
919 		 * backends, this is necessary: epoll and kqueue, for
920 		 * instance, have events associated with a kernel
921 		 * structure. If didn't reinitialize, we'd share that
922 		 * structure with the parent process, and any changes made by
923 		 * the parent would affect our backend's behavior (and vice
924 		 * versa).
925 		 */
926 		if (base->evsel->dealloc != NULL)
927 			base->evsel->dealloc(base);
928 		base->evbase = evsel->init(base);
929 		if (base->evbase == NULL) {
930 			event_errx(1,
931 			   "%s: could not reinitialize event mechanism",
932 			   __func__);
933 			res = -1;
934 			goto done;
935 		}
936 
937 		/* Empty out the changelist (if any): we are starting from a
938 		 * blank slate. */
939 		event_changelist_freemem_(&base->changelist);
940 
941 		/* Tell the event maps to re-inform the backend about all
942 		 * pending events. This will make the signal notification
943 		 * event get re-created if necessary. */
944 		if (evmap_reinit_(base) < 0)
945 			res = -1;
946 	} else {
947 		if (had_signal_added)
948 			res = evsig_init_(base);
949 	}
950 
951 	/* If we were notifiable before, and nothing just exploded, become
952 	 * notifiable again. */
953 	if (was_notifiable && res == 0)
954 		res = evthread_make_base_notifiable_nolock_(base);
955 
956 done:
957 	EVBASE_RELEASE_LOCK(base, th_base_lock);
958 	return (res);
959 }
960 
961 const char **
962 event_get_supported_methods(void)
963 {
964 	static const char **methods = NULL;
965 	const struct eventop **method;
966 	const char **tmp;
967 	int i = 0, k;
968 
969 	/* count all methods */
970 	for (method = &eventops[0]; *method != NULL; ++method) {
971 		++i;
972 	}
973 
974 	/* allocate one more than we need for the NULL pointer */
975 	tmp = mm_calloc((i + 1), sizeof(char *));
976 	if (tmp == NULL)
977 		return (NULL);
978 
979 	/* populate the array with the supported methods */
980 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
981 		tmp[i++] = eventops[k]->name;
982 	}
983 	tmp[i] = NULL;
984 
985 	if (methods != NULL)
986 		mm_free((char**)methods);
987 
988 	methods = tmp;
989 
990 	return (methods);
991 }
992 
993 struct event_config *
994 event_config_new(void)
995 {
996 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
997 
998 	if (cfg == NULL)
999 		return (NULL);
1000 
1001 	TAILQ_INIT(&cfg->entries);
1002 	cfg->max_dispatch_interval.tv_sec = -1;
1003 	cfg->max_dispatch_callbacks = INT_MAX;
1004 	cfg->limit_callbacks_after_prio = 1;
1005 
1006 	return (cfg);
1007 }
1008 
1009 static void
1010 event_config_entry_free(struct event_config_entry *entry)
1011 {
1012 	if (entry->avoid_method != NULL)
1013 		mm_free((char *)entry->avoid_method);
1014 	mm_free(entry);
1015 }
1016 
1017 void
1018 event_config_free(struct event_config *cfg)
1019 {
1020 	struct event_config_entry *entry;
1021 
1022 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1023 		TAILQ_REMOVE(&cfg->entries, entry, next);
1024 		event_config_entry_free(entry);
1025 	}
1026 	mm_free(cfg);
1027 }
1028 
1029 int
1030 event_config_set_flag(struct event_config *cfg, int flag)
1031 {
1032 	if (!cfg)
1033 		return -1;
1034 	cfg->flags |= flag;
1035 	return 0;
1036 }
1037 
1038 int
1039 event_config_avoid_method(struct event_config *cfg, const char *method)
1040 {
1041 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1042 	if (entry == NULL)
1043 		return (-1);
1044 
1045 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1046 		mm_free(entry);
1047 		return (-1);
1048 	}
1049 
1050 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1051 
1052 	return (0);
1053 }
1054 
1055 int
1056 event_config_require_features(struct event_config *cfg,
1057     int features)
1058 {
1059 	if (!cfg)
1060 		return (-1);
1061 	cfg->require_features = features;
1062 	return (0);
1063 }
1064 
1065 int
1066 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1067 {
1068 	if (!cfg)
1069 		return (-1);
1070 	cfg->n_cpus_hint = cpus;
1071 	return (0);
1072 }
1073 
1074 int
1075 event_config_set_max_dispatch_interval(struct event_config *cfg,
1076     const struct timeval *max_interval, int max_callbacks, int min_priority)
1077 {
1078 	if (max_interval)
1079 		memcpy(&cfg->max_dispatch_interval, max_interval,
1080 		    sizeof(struct timeval));
1081 	else
1082 		cfg->max_dispatch_interval.tv_sec = -1;
1083 	cfg->max_dispatch_callbacks =
1084 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1085 	if (min_priority < 0)
1086 		min_priority = 0;
1087 	cfg->limit_callbacks_after_prio = min_priority;
1088 	return (0);
1089 }
1090 
1091 int
1092 event_priority_init(int npriorities)
1093 {
1094 	return event_base_priority_init(current_base, npriorities);
1095 }
1096 
1097 int
1098 event_base_priority_init(struct event_base *base, int npriorities)
1099 {
1100 	int i, r;
1101 	r = -1;
1102 
1103 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1104 
1105 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1106 	    || npriorities >= EVENT_MAX_PRIORITIES)
1107 		goto err;
1108 
1109 	if (npriorities == base->nactivequeues)
1110 		goto ok;
1111 
1112 	if (base->nactivequeues) {
1113 		mm_free(base->activequeues);
1114 		base->nactivequeues = 0;
1115 	}
1116 
1117 	/* Allocate our priority queues */
1118 	base->activequeues = (struct evcallback_list *)
1119 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1120 	if (base->activequeues == NULL) {
1121 		event_warn("%s: calloc", __func__);
1122 		goto err;
1123 	}
1124 	base->nactivequeues = npriorities;
1125 
1126 	for (i = 0; i < base->nactivequeues; ++i) {
1127 		TAILQ_INIT(&base->activequeues[i]);
1128 	}
1129 
1130 ok:
1131 	r = 0;
1132 err:
1133 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1134 	return (r);
1135 }
1136 
1137 int
1138 event_base_get_npriorities(struct event_base *base)
1139 {
1140 
1141 	int n;
1142 	if (base == NULL)
1143 		base = current_base;
1144 
1145 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1146 	n = base->nactivequeues;
1147 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1148 	return (n);
1149 }
1150 
1151 /* Returns true iff we're currently watching any events. */
1152 static int
1153 event_haveevents(struct event_base *base)
1154 {
1155 	/* Caller must hold th_base_lock */
1156 	return (base->virtual_event_count > 0 || base->event_count > 0);
1157 }
1158 
1159 /* "closure" function called when processing active signal events */
1160 static inline void
1161 event_signal_closure(struct event_base *base, struct event *ev)
1162 {
1163 	short ncalls;
1164 	int should_break;
1165 
1166 	/* Allows deletes to work */
1167 	ncalls = ev->ev_ncalls;
1168 	if (ncalls != 0)
1169 		ev->ev_pncalls = &ncalls;
1170 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1171 	while (ncalls) {
1172 		ncalls--;
1173 		ev->ev_ncalls = ncalls;
1174 		if (ncalls == 0)
1175 			ev->ev_pncalls = NULL;
1176 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1177 
1178 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1179 		should_break = base->event_break;
1180 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1181 
1182 		if (should_break) {
1183 			if (ncalls != 0)
1184 				ev->ev_pncalls = NULL;
1185 			return;
1186 		}
1187 	}
1188 }
1189 
1190 /* Common timeouts are special timeouts that are handled as queues rather than
1191  * in the minheap.  This is more efficient than the minheap if we happen to
1192  * know that we're going to get several thousands of timeout events all with
1193  * the same timeout value.
1194  *
1195  * Since all our timeout handling code assumes timevals can be copied,
1196  * assigned, etc, we can't use "magic pointer" to encode these common
1197  * timeouts.  Searching through a list to see if every timeout is common could
1198  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1199  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1200  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1201  * of index into the event_base's aray of common timeouts.
1202  */
1203 
1204 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1205 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1206 #define COMMON_TIMEOUT_IDX_SHIFT 20
1207 #define COMMON_TIMEOUT_MASK     0xf0000000
1208 #define COMMON_TIMEOUT_MAGIC    0x50000000
1209 
1210 #define COMMON_TIMEOUT_IDX(tv) \
1211 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1212 
1213 /** Return true iff if 'tv' is a common timeout in 'base' */
1214 static inline int
1215 is_common_timeout(const struct timeval *tv,
1216     const struct event_base *base)
1217 {
1218 	int idx;
1219 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1220 		return 0;
1221 	idx = COMMON_TIMEOUT_IDX(tv);
1222 	return idx < base->n_common_timeouts;
1223 }
1224 
1225 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1226  * one is a common timeout. */
1227 static inline int
1228 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1229 {
1230 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1231 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1232 }
1233 
1234 /** Requires that 'tv' is a common timeout.  Return the corresponding
1235  * common_timeout_list. */
1236 static inline struct common_timeout_list *
1237 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1238 {
1239 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1240 }
1241 
1242 #if 0
1243 static inline int
1244 common_timeout_ok(const struct timeval *tv,
1245     struct event_base *base)
1246 {
1247 	const struct timeval *expect =
1248 	    &get_common_timeout_list(base, tv)->duration;
1249 	return tv->tv_sec == expect->tv_sec &&
1250 	    tv->tv_usec == expect->tv_usec;
1251 }
1252 #endif
1253 
1254 /* Add the timeout for the first event in given common timeout list to the
1255  * event_base's minheap. */
1256 static void
1257 common_timeout_schedule(struct common_timeout_list *ctl,
1258     const struct timeval *now, struct event *head)
1259 {
1260 	struct timeval timeout = head->ev_timeout;
1261 	timeout.tv_usec &= MICROSECONDS_MASK;
1262 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1263 }
1264 
1265 /* Callback: invoked when the timeout for a common timeout queue triggers.
1266  * This means that (at least) the first event in that queue should be run,
1267  * and the timeout should be rescheduled if there are more events. */
1268 static void
1269 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1270 {
1271 	struct timeval now;
1272 	struct common_timeout_list *ctl = arg;
1273 	struct event_base *base = ctl->base;
1274 	struct event *ev = NULL;
1275 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1276 	gettime(base, &now);
1277 	while (1) {
1278 		ev = TAILQ_FIRST(&ctl->events);
1279 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1280 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1281 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1282 			break;
1283 		event_del_nolock_(ev);
1284 		event_active_nolock_(ev, EV_TIMEOUT, 1);
1285 	}
1286 	if (ev)
1287 		common_timeout_schedule(ctl, &now, ev);
1288 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1289 }
1290 
1291 #define MAX_COMMON_TIMEOUTS 256
1292 
1293 const struct timeval *
1294 event_base_init_common_timeout(struct event_base *base,
1295     const struct timeval *duration)
1296 {
1297 	int i;
1298 	struct timeval tv;
1299 	const struct timeval *result=NULL;
1300 	struct common_timeout_list *new_ctl;
1301 
1302 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1303 	if (duration->tv_usec > 1000000) {
1304 		memcpy(&tv, duration, sizeof(struct timeval));
1305 		if (is_common_timeout(duration, base))
1306 			tv.tv_usec &= MICROSECONDS_MASK;
1307 		tv.tv_sec += tv.tv_usec / 1000000;
1308 		tv.tv_usec %= 1000000;
1309 		duration = &tv;
1310 	}
1311 	for (i = 0; i < base->n_common_timeouts; ++i) {
1312 		const struct common_timeout_list *ctl =
1313 		    base->common_timeout_queues[i];
1314 		if (duration->tv_sec == ctl->duration.tv_sec &&
1315 		    duration->tv_usec ==
1316 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1317 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1318 			result = &ctl->duration;
1319 			goto done;
1320 		}
1321 	}
1322 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1323 		event_warnx("%s: Too many common timeouts already in use; "
1324 		    "we only support %d per event_base", __func__,
1325 		    MAX_COMMON_TIMEOUTS);
1326 		goto done;
1327 	}
1328 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1329 		int n = base->n_common_timeouts < 16 ? 16 :
1330 		    base->n_common_timeouts*2;
1331 		struct common_timeout_list **newqueues =
1332 		    mm_realloc(base->common_timeout_queues,
1333 			n*sizeof(struct common_timeout_queue *));
1334 		if (!newqueues) {
1335 			event_warn("%s: realloc",__func__);
1336 			goto done;
1337 		}
1338 		base->n_common_timeouts_allocated = n;
1339 		base->common_timeout_queues = newqueues;
1340 	}
1341 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1342 	if (!new_ctl) {
1343 		event_warn("%s: calloc",__func__);
1344 		goto done;
1345 	}
1346 	TAILQ_INIT(&new_ctl->events);
1347 	new_ctl->duration.tv_sec = duration->tv_sec;
1348 	new_ctl->duration.tv_usec =
1349 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1350 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1351 	evtimer_assign(&new_ctl->timeout_event, base,
1352 	    common_timeout_callback, new_ctl);
1353 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1354 	event_priority_set(&new_ctl->timeout_event, 0);
1355 	new_ctl->base = base;
1356 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1357 	result = &new_ctl->duration;
1358 
1359 done:
1360 	if (result)
1361 		EVUTIL_ASSERT(is_common_timeout(result, base));
1362 
1363 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1364 	return result;
1365 }
1366 
1367 /* Closure function invoked when we're activating a persistent event. */
1368 static inline void
1369 event_persist_closure(struct event_base *base, struct event *ev)
1370 {
1371 	/* reschedule the persistent event if we have a timeout. */
1372 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1373 		/* If there was a timeout, we want it to run at an interval of
1374 		 * ev_io_timeout after the last time it was _scheduled_ for,
1375 		 * not ev_io_timeout after _now_.  If it fired for another
1376 		 * reason, though, the timeout ought to start ticking _now_. */
1377 		struct timeval run_at, relative_to, delay, now;
1378 		ev_uint32_t usec_mask = 0;
1379 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1380 			&ev->ev_io_timeout));
1381 		gettime(base, &now);
1382 		if (is_common_timeout(&ev->ev_timeout, base)) {
1383 			delay = ev->ev_io_timeout;
1384 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1385 			delay.tv_usec &= MICROSECONDS_MASK;
1386 			if (ev->ev_res & EV_TIMEOUT) {
1387 				relative_to = ev->ev_timeout;
1388 				relative_to.tv_usec &= MICROSECONDS_MASK;
1389 			} else {
1390 				relative_to = now;
1391 			}
1392 		} else {
1393 			delay = ev->ev_io_timeout;
1394 			if (ev->ev_res & EV_TIMEOUT) {
1395 				relative_to = ev->ev_timeout;
1396 			} else {
1397 				relative_to = now;
1398 			}
1399 		}
1400 		evutil_timeradd(&relative_to, &delay, &run_at);
1401 		if (evutil_timercmp(&run_at, &now, <)) {
1402 			/* Looks like we missed at least one invocation due to
1403 			 * a clock jump, not running the event loop for a
1404 			 * while, really slow callbacks, or
1405 			 * something. Reschedule relative to now.
1406 			 */
1407 			evutil_timeradd(&now, &delay, &run_at);
1408 		}
1409 		run_at.tv_usec |= usec_mask;
1410 		event_add_nolock_(ev, &run_at, 1);
1411 	}
1412 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1413 	(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1414 }
1415 
1416 /*
1417   Helper for event_process_active to process all the events in a single queue,
1418   releasing the lock as we go.  This function requires that the lock be held
1419   when it's invoked.  Returns -1 if we get a signal or an event_break that
1420   means we should stop processing any active events now.  Otherwise returns
1421   the number of non-internal event_callbacks that we processed.
1422 */
1423 static int
1424 event_process_active_single_queue(struct event_base *base,
1425     struct evcallback_list *activeq,
1426     int max_to_process, const struct timeval *endtime)
1427 {
1428 	struct event_callback *evcb;
1429 	int count = 0;
1430 
1431 	EVUTIL_ASSERT(activeq != NULL);
1432 
1433 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1434 		struct event *ev=NULL;
1435 		if (evcb->evcb_flags & EVLIST_INIT) {
1436 			ev = event_callback_to_event(evcb);
1437 
1438 			if (ev->ev_events & EV_PERSIST)
1439 				event_queue_remove_active(base, evcb);
1440 			else
1441 				event_del_nolock_(ev);
1442 			event_debug((
1443 			    "event_process_active: event: %p, %s%scall %p",
1444 			    ev,
1445 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1446 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1447 			    ev->ev_callback));
1448 		} else {
1449 			event_queue_remove_active(base, evcb);
1450 			event_debug(("event_process_active: event_callback %p, "
1451 				"closure %d, call %p",
1452 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1453 		}
1454 
1455 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1456 			++count;
1457 
1458 
1459 		base->current_event = evcb;
1460 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1461 		base->current_event_waiters = 0;
1462 #endif
1463 
1464 		switch (evcb->evcb_closure) {
1465 		case EV_CLOSURE_EVENT_SIGNAL:
1466 			event_signal_closure(base, ev);
1467 			break;
1468 		case EV_CLOSURE_EVENT_PERSIST:
1469 			event_persist_closure(base, ev);
1470 			break;
1471 		case EV_CLOSURE_EVENT:
1472 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1473 			(*ev->ev_callback)(
1474 				ev->ev_fd, ev->ev_res, ev->ev_arg);
1475 			break;
1476 		case EV_CLOSURE_CB_SELF:
1477 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1478 			evcb->evcb_cb_union.evcb_selfcb(evcb, evcb->evcb_arg);
1479 			break;
1480 		default:
1481 			EVUTIL_ASSERT(0);
1482 		}
1483 
1484 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1485 		base->current_event = NULL;
1486 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1487 		if (base->current_event_waiters) {
1488 			base->current_event_waiters = 0;
1489 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1490 		}
1491 #endif
1492 
1493 		if (base->event_break)
1494 			return -1;
1495 		if (count >= max_to_process)
1496 			return count;
1497 		if (count && endtime) {
1498 			struct timeval now;
1499 			update_time_cache(base);
1500 			gettime(base, &now);
1501 			if (evutil_timercmp(&now, endtime, >=))
1502 				return count;
1503 		}
1504 		if (base->event_continue)
1505 			break;
1506 	}
1507 	return count;
1508 }
1509 
1510 /*
1511  * Active events are stored in priority queues.  Lower priorities are always
1512  * process before higher priorities.  Low priority events can starve high
1513  * priority ones.
1514  */
1515 
1516 static int
1517 event_process_active(struct event_base *base)
1518 {
1519 	/* Caller must hold th_base_lock */
1520 	struct evcallback_list *activeq = NULL;
1521 	int i, c = 0;
1522 	const struct timeval *endtime;
1523 	struct timeval tv;
1524 	const int maxcb = base->max_dispatch_callbacks;
1525 	const int limit_after_prio = base->limit_callbacks_after_prio;
1526 	if (base->max_dispatch_time.tv_sec >= 0) {
1527 		update_time_cache(base);
1528 		gettime(base, &tv);
1529 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1530 		endtime = &tv;
1531 	} else {
1532 		endtime = NULL;
1533 	}
1534 
1535 	for (i = 0; i < base->nactivequeues; ++i) {
1536 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1537 			base->event_running_priority = i;
1538 			activeq = &base->activequeues[i];
1539 			if (i < limit_after_prio)
1540 				c = event_process_active_single_queue(base, activeq,
1541 				    INT_MAX, NULL);
1542 			else
1543 				c = event_process_active_single_queue(base, activeq,
1544 				    maxcb, endtime);
1545 			if (c < 0) {
1546 				goto done;
1547 			} else if (c > 0)
1548 				break; /* Processed a real event; do not
1549 					* consider lower-priority events */
1550 			/* If we get here, all of the events we processed
1551 			 * were internal.  Continue. */
1552 		}
1553 	}
1554 
1555 done:
1556 	base->event_running_priority = -1;
1557 
1558 	return c;
1559 }
1560 
1561 /*
1562  * Wait continuously for events.  We exit only if no events are left.
1563  */
1564 
1565 int
1566 event_dispatch(void)
1567 {
1568 	return (event_loop(0));
1569 }
1570 
1571 int
1572 event_base_dispatch(struct event_base *event_base)
1573 {
1574 	return (event_base_loop(event_base, 0));
1575 }
1576 
1577 const char *
1578 event_base_get_method(const struct event_base *base)
1579 {
1580 	EVUTIL_ASSERT(base);
1581 	return (base->evsel->name);
1582 }
1583 
1584 /** Callback: used to implement event_base_loopexit by telling the event_base
1585  * that it's time to exit its loop. */
1586 static void
1587 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1588 {
1589 	struct event_base *base = arg;
1590 	base->event_gotterm = 1;
1591 }
1592 
1593 int
1594 event_loopexit(const struct timeval *tv)
1595 {
1596 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1597 		    current_base, tv));
1598 }
1599 
1600 int
1601 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1602 {
1603 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1604 		    event_base, tv));
1605 }
1606 
1607 int
1608 event_loopbreak(void)
1609 {
1610 	return (event_base_loopbreak(current_base));
1611 }
1612 
1613 int
1614 event_base_loopbreak(struct event_base *event_base)
1615 {
1616 	int r = 0;
1617 	if (event_base == NULL)
1618 		return (-1);
1619 
1620 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1621 	event_base->event_break = 1;
1622 
1623 	if (EVBASE_NEED_NOTIFY(event_base)) {
1624 		r = evthread_notify_base(event_base);
1625 	} else {
1626 		r = (0);
1627 	}
1628 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1629 	return r;
1630 }
1631 
1632 int
1633 event_base_loopcontinue(struct event_base *event_base)
1634 {
1635 	int r = 0;
1636 	if (event_base == NULL)
1637 		return (-1);
1638 
1639 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1640 	event_base->event_continue = 1;
1641 
1642 	if (EVBASE_NEED_NOTIFY(event_base)) {
1643 		r = evthread_notify_base(event_base);
1644 	} else {
1645 		r = (0);
1646 	}
1647 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1648 	return r;
1649 }
1650 
1651 int
1652 event_base_got_break(struct event_base *event_base)
1653 {
1654 	int res;
1655 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1656 	res = event_base->event_break;
1657 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1658 	return res;
1659 }
1660 
1661 int
1662 event_base_got_exit(struct event_base *event_base)
1663 {
1664 	int res;
1665 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1666 	res = event_base->event_gotterm;
1667 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1668 	return res;
1669 }
1670 
1671 /* not thread safe */
1672 
1673 int
1674 event_loop(int flags)
1675 {
1676 	return event_base_loop(current_base, flags);
1677 }
1678 
1679 int
1680 event_base_loop(struct event_base *base, int flags)
1681 {
1682 	const struct eventop *evsel = base->evsel;
1683 	struct timeval tv;
1684 	struct timeval *tv_p;
1685 	int res, done, retval = 0;
1686 
1687 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1688 	 * as we invoke user callbacks. */
1689 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1690 
1691 	if (base->running_loop) {
1692 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1693 		    " can run on each event_base at once.", __func__);
1694 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1695 		return -1;
1696 	}
1697 
1698 	base->running_loop = 1;
1699 
1700 	clear_time_cache(base);
1701 
1702 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1703 		evsig_set_base_(base);
1704 
1705 	done = 0;
1706 
1707 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1708 	base->th_owner_id = EVTHREAD_GET_ID();
1709 #endif
1710 
1711 	base->event_gotterm = base->event_break = 0;
1712 
1713 	while (!done) {
1714 		base->event_continue = 0;
1715 		base->n_deferreds_queued = 0;
1716 
1717 		/* Terminate the loop if we have been asked to */
1718 		if (base->event_gotterm) {
1719 			break;
1720 		}
1721 
1722 		if (base->event_break) {
1723 			break;
1724 		}
1725 
1726 		tv_p = &tv;
1727 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1728 			timeout_next(base, &tv_p);
1729 		} else {
1730 			/*
1731 			 * if we have active events, we just poll new events
1732 			 * without waiting.
1733 			 */
1734 			evutil_timerclear(&tv);
1735 		}
1736 
1737 		/* If we have no events, we just exit */
1738 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1739 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1740 			event_debug(("%s: no events registered.", __func__));
1741 			retval = 1;
1742 			goto done;
1743 		}
1744 
1745 		event_queue_make_later_events_active(base);
1746 
1747 		clear_time_cache(base);
1748 
1749 		res = evsel->dispatch(base, tv_p);
1750 
1751 		if (res == -1) {
1752 			event_debug(("%s: dispatch returned unsuccessfully.",
1753 				__func__));
1754 			retval = -1;
1755 			goto done;
1756 		}
1757 
1758 		update_time_cache(base);
1759 
1760 		timeout_process(base);
1761 
1762 		if (N_ACTIVE_CALLBACKS(base)) {
1763 			int n = event_process_active(base);
1764 			if ((flags & EVLOOP_ONCE)
1765 			    && N_ACTIVE_CALLBACKS(base) == 0
1766 			    && n != 0)
1767 				done = 1;
1768 		} else if (flags & EVLOOP_NONBLOCK)
1769 			done = 1;
1770 	}
1771 	event_debug(("%s: asked to terminate loop.", __func__));
1772 
1773 done:
1774 	clear_time_cache(base);
1775 	base->running_loop = 0;
1776 
1777 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1778 
1779 	return (retval);
1780 }
1781 
1782 /* One-time callback to implement event_base_once: invokes the user callback,
1783  * then deletes the allocated storage */
1784 static void
1785 event_once_cb(evutil_socket_t fd, short events, void *arg)
1786 {
1787 	struct event_once *eonce = arg;
1788 
1789 	(*eonce->cb)(fd, events, eonce->arg);
1790 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1791 	LIST_REMOVE(eonce, next_once);
1792 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1793 	event_debug_unassign(&eonce->ev);
1794 	mm_free(eonce);
1795 }
1796 
1797 /* not threadsafe, event scheduled once. */
1798 int
1799 event_once(evutil_socket_t fd, short events,
1800     void (*callback)(evutil_socket_t, short, void *),
1801     void *arg, const struct timeval *tv)
1802 {
1803 	return event_base_once(current_base, fd, events, callback, arg, tv);
1804 }
1805 
1806 /* Schedules an event once */
1807 int
1808 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1809     void (*callback)(evutil_socket_t, short, void *),
1810     void *arg, const struct timeval *tv)
1811 {
1812 	struct event_once *eonce;
1813 	int res = 0;
1814 	int activate = 0;
1815 
1816 	/* We cannot support signals that just fire once, or persistent
1817 	 * events. */
1818 	if (events & (EV_SIGNAL|EV_PERSIST))
1819 		return (-1);
1820 
1821 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1822 		return (-1);
1823 
1824 	eonce->cb = callback;
1825 	eonce->arg = arg;
1826 
1827 	if (events == EV_TIMEOUT) {
1828 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1829 
1830 		if (tv == NULL || ! evutil_timerisset(tv)) {
1831 			/* If the event is going to become active immediately,
1832 			 * don't put it on the timeout queue.  This is one
1833 			 * idiom for scheduling a callback, so let's make
1834 			 * it fast (and order-preserving). */
1835 			activate = 1;
1836 		}
1837 	} else if (events & (EV_READ|EV_WRITE)) {
1838 		events &= EV_READ|EV_WRITE;
1839 
1840 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1841 	} else {
1842 		/* Bad event combination */
1843 		mm_free(eonce);
1844 		return (-1);
1845 	}
1846 
1847 	if (res == 0) {
1848 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1849 		if (activate)
1850 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
1851 		else
1852 			res = event_add_nolock_(&eonce->ev, tv, 0);
1853 
1854 		if (res != 0) {
1855 			mm_free(eonce);
1856 			return (res);
1857 		} else {
1858 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
1859 		}
1860 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1861 	}
1862 
1863 	return (0);
1864 }
1865 
1866 int
1867 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
1868 {
1869 	if (!base)
1870 		base = current_base;
1871 	if (arg == &event_self_cbarg_ptr_)
1872 		arg = ev;
1873 
1874 	event_debug_assert_not_added_(ev);
1875 
1876 	ev->ev_base = base;
1877 
1878 	ev->ev_callback = callback;
1879 	ev->ev_arg = arg;
1880 	ev->ev_fd = fd;
1881 	ev->ev_events = events;
1882 	ev->ev_res = 0;
1883 	ev->ev_flags = EVLIST_INIT;
1884 	ev->ev_ncalls = 0;
1885 	ev->ev_pncalls = NULL;
1886 
1887 	if (events & EV_SIGNAL) {
1888 		if ((events & (EV_READ|EV_WRITE)) != 0) {
1889 			event_warnx("%s: EV_SIGNAL is not compatible with "
1890 			    "EV_READ or EV_WRITE", __func__);
1891 			return -1;
1892 		}
1893 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
1894 	} else {
1895 		if (events & EV_PERSIST) {
1896 			evutil_timerclear(&ev->ev_io_timeout);
1897 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
1898 		} else {
1899 			ev->ev_closure = EV_CLOSURE_EVENT;
1900 		}
1901 	}
1902 
1903 	min_heap_elem_init_(ev);
1904 
1905 	if (base != NULL) {
1906 		/* by default, we put new events into the middle priority */
1907 		ev->ev_pri = base->nactivequeues / 2;
1908 	}
1909 
1910 	event_debug_note_setup_(ev);
1911 
1912 	return 0;
1913 }
1914 
1915 int
1916 event_base_set(struct event_base *base, struct event *ev)
1917 {
1918 	/* Only innocent events may be assigned to a different base */
1919 	if (ev->ev_flags != EVLIST_INIT)
1920 		return (-1);
1921 
1922 	event_debug_assert_is_setup_(ev);
1923 
1924 	ev->ev_base = base;
1925 	ev->ev_pri = base->nactivequeues/2;
1926 
1927 	return (0);
1928 }
1929 
1930 void
1931 event_set(struct event *ev, evutil_socket_t fd, short events,
1932 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
1933 {
1934 	int r;
1935 	r = event_assign(ev, current_base, fd, events, callback, arg);
1936 	EVUTIL_ASSERT(r == 0);
1937 }
1938 
1939 void *
1940 event_self_cbarg(void)
1941 {
1942 	return &event_self_cbarg_ptr_;
1943 }
1944 
1945 struct event *
1946 event_base_get_running_event(struct event_base *base)
1947 {
1948 	struct event *ev = NULL;
1949 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1950 	if (EVBASE_IN_THREAD(base)) {
1951 		struct event_callback *evcb = base->current_event;
1952 		if (evcb->evcb_flags & EVLIST_INIT)
1953 			ev = event_callback_to_event(evcb);
1954 	}
1955 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1956 	return ev;
1957 }
1958 
1959 struct event *
1960 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
1961 {
1962 	struct event *ev;
1963 	ev = mm_malloc(sizeof(struct event));
1964 	if (ev == NULL)
1965 		return (NULL);
1966 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
1967 		mm_free(ev);
1968 		return (NULL);
1969 	}
1970 
1971 	return (ev);
1972 }
1973 
1974 void
1975 event_free(struct event *ev)
1976 {
1977 	event_debug_assert_is_setup_(ev);
1978 
1979 	/* make sure that this event won't be coming back to haunt us. */
1980 	event_del(ev);
1981 	event_debug_note_teardown_(ev);
1982 	mm_free(ev);
1983 
1984 }
1985 
1986 void
1987 event_debug_unassign(struct event *ev)
1988 {
1989 	event_debug_assert_not_added_(ev);
1990 	event_debug_note_teardown_(ev);
1991 
1992 	ev->ev_flags &= ~EVLIST_INIT;
1993 }
1994 
1995 /*
1996  * Set's the priority of an event - if an event is already scheduled
1997  * changing the priority is going to fail.
1998  */
1999 
2000 int
2001 event_priority_set(struct event *ev, int pri)
2002 {
2003 	event_debug_assert_is_setup_(ev);
2004 
2005 	if (ev->ev_flags & EVLIST_ACTIVE)
2006 		return (-1);
2007 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2008 		return (-1);
2009 
2010 	ev->ev_pri = pri;
2011 
2012 	return (0);
2013 }
2014 
2015 /*
2016  * Checks if a specific event is pending or scheduled.
2017  */
2018 
2019 int
2020 event_pending(const struct event *ev, short event, struct timeval *tv)
2021 {
2022 	int flags = 0;
2023 
2024 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2025 		event_warnx("%s: event has no event_base set.", __func__);
2026 		return 0;
2027 	}
2028 
2029 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2030 	event_debug_assert_is_setup_(ev);
2031 
2032 	if (ev->ev_flags & EVLIST_INSERTED)
2033 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
2034 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2035 		flags |= ev->ev_res;
2036 	if (ev->ev_flags & EVLIST_TIMEOUT)
2037 		flags |= EV_TIMEOUT;
2038 
2039 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
2040 
2041 	/* See if there is a timeout that we should report */
2042 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2043 		struct timeval tmp = ev->ev_timeout;
2044 		tmp.tv_usec &= MICROSECONDS_MASK;
2045 		/* correctly remamp to real time */
2046 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2047 	}
2048 
2049 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2050 
2051 	return (flags & event);
2052 }
2053 
2054 int
2055 event_initialized(const struct event *ev)
2056 {
2057 	if (!(ev->ev_flags & EVLIST_INIT))
2058 		return 0;
2059 
2060 	return 1;
2061 }
2062 
2063 void
2064 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2065 {
2066 	event_debug_assert_is_setup_(event);
2067 
2068 	if (base_out)
2069 		*base_out = event->ev_base;
2070 	if (fd_out)
2071 		*fd_out = event->ev_fd;
2072 	if (events_out)
2073 		*events_out = event->ev_events;
2074 	if (callback_out)
2075 		*callback_out = event->ev_callback;
2076 	if (arg_out)
2077 		*arg_out = event->ev_arg;
2078 }
2079 
2080 size_t
2081 event_get_struct_event_size(void)
2082 {
2083 	return sizeof(struct event);
2084 }
2085 
2086 evutil_socket_t
2087 event_get_fd(const struct event *ev)
2088 {
2089 	event_debug_assert_is_setup_(ev);
2090 	return ev->ev_fd;
2091 }
2092 
2093 struct event_base *
2094 event_get_base(const struct event *ev)
2095 {
2096 	event_debug_assert_is_setup_(ev);
2097 	return ev->ev_base;
2098 }
2099 
2100 short
2101 event_get_events(const struct event *ev)
2102 {
2103 	event_debug_assert_is_setup_(ev);
2104 	return ev->ev_events;
2105 }
2106 
2107 event_callback_fn
2108 event_get_callback(const struct event *ev)
2109 {
2110 	event_debug_assert_is_setup_(ev);
2111 	return ev->ev_callback;
2112 }
2113 
2114 void *
2115 event_get_callback_arg(const struct event *ev)
2116 {
2117 	event_debug_assert_is_setup_(ev);
2118 	return ev->ev_arg;
2119 }
2120 
2121 int
2122 event_get_priority(const struct event *ev)
2123 {
2124 	event_debug_assert_is_setup_(ev);
2125 	return ev->ev_pri;
2126 }
2127 
2128 int
2129 event_add(struct event *ev, const struct timeval *tv)
2130 {
2131 	int res;
2132 
2133 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2134 		event_warnx("%s: event has no event_base set.", __func__);
2135 		return -1;
2136 	}
2137 
2138 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2139 
2140 	res = event_add_nolock_(ev, tv, 0);
2141 
2142 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2143 
2144 	return (res);
2145 }
2146 
2147 /* Helper callback: wake an event_base from another thread.  This version
2148  * works by writing a byte to one end of a socketpair, so that the event_base
2149  * listening on the other end will wake up as the corresponding event
2150  * triggers */
2151 static int
2152 evthread_notify_base_default(struct event_base *base)
2153 {
2154 	char buf[1];
2155 	int r;
2156 	buf[0] = (char) 0;
2157 #ifdef _WIN32
2158 	r = send(base->th_notify_fd[1], buf, 1, 0);
2159 #else
2160 	r = write(base->th_notify_fd[1], buf, 1);
2161 #endif
2162 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2163 }
2164 
2165 #ifdef EVENT__HAVE_EVENTFD
2166 /* Helper callback: wake an event_base from another thread.  This version
2167  * assumes that you have a working eventfd() implementation. */
2168 static int
2169 evthread_notify_base_eventfd(struct event_base *base)
2170 {
2171 	ev_uint64_t msg = 1;
2172 	int r;
2173 	do {
2174 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2175 	} while (r < 0 && errno == EAGAIN);
2176 
2177 	return (r < 0) ? -1 : 0;
2178 }
2179 #endif
2180 
2181 
2182 /** Tell the thread currently running the event_loop for base (if any) that it
2183  * needs to stop waiting in its dispatch function (if it is) and process all
2184  * active callbacks. */
2185 static int
2186 evthread_notify_base(struct event_base *base)
2187 {
2188 	EVENT_BASE_ASSERT_LOCKED(base);
2189 	if (!base->th_notify_fn)
2190 		return -1;
2191 	if (base->is_notify_pending)
2192 		return 0;
2193 	base->is_notify_pending = 1;
2194 	return base->th_notify_fn(base);
2195 }
2196 
2197 /* Implementation function to remove a timeout on a currently pending event.
2198  */
2199 int
2200 event_remove_timer_nolock_(struct event *ev)
2201 {
2202 	struct event_base *base = ev->ev_base;
2203 
2204 	EVENT_BASE_ASSERT_LOCKED(base);
2205 	event_debug_assert_is_setup_(ev);
2206 
2207 	event_debug(("event_remove_timer_nolock: event: %p", ev));
2208 
2209 	/* If it's not pending on a timeout, we don't need to do anything. */
2210 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2211 		event_queue_remove_timeout(base, ev);
2212 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2213 	}
2214 
2215 	return (0);
2216 }
2217 
2218 int
2219 event_remove_timer(struct event *ev)
2220 {
2221 	int res;
2222 
2223 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2224 		event_warnx("%s: event has no event_base set.", __func__);
2225 		return -1;
2226 	}
2227 
2228 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2229 
2230 	res = event_remove_timer_nolock_(ev);
2231 
2232 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2233 
2234 	return (res);
2235 }
2236 
2237 /* Implementation function to add an event.  Works just like event_add,
2238  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2239  * we treat tv as an absolute time, not as an interval to add to the current
2240  * time */
2241 int
2242 event_add_nolock_(struct event *ev, const struct timeval *tv,
2243     int tv_is_absolute)
2244 {
2245 	struct event_base *base = ev->ev_base;
2246 	int res = 0;
2247 	int notify = 0;
2248 
2249 	EVENT_BASE_ASSERT_LOCKED(base);
2250 	event_debug_assert_is_setup_(ev);
2251 
2252 	event_debug((
2253 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%scall %p",
2254 		 ev,
2255 		 EV_SOCK_ARG(ev->ev_fd),
2256 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2257 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2258 		 tv ? "EV_TIMEOUT " : " ",
2259 		 ev->ev_callback));
2260 
2261 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2262 
2263 	/*
2264 	 * prepare for timeout insertion further below, if we get a
2265 	 * failure on any step, we should not change any state.
2266 	 */
2267 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2268 		if (min_heap_reserve_(&base->timeheap,
2269 			1 + min_heap_size_(&base->timeheap)) == -1)
2270 			return (-1);  /* ENOMEM == errno */
2271 	}
2272 
2273 	/* If the main thread is currently executing a signal event's
2274 	 * callback, and we are not the main thread, then we want to wait
2275 	 * until the callback is done before we mess with the event, or else
2276 	 * we can race on ev_ncalls and ev_pncalls below. */
2277 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2278 	if (base->current_event == event_to_event_callback(ev) &&
2279 	    (ev->ev_events & EV_SIGNAL)
2280 	    && !EVBASE_IN_THREAD(base)) {
2281 		++base->current_event_waiters;
2282 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2283 	}
2284 #endif
2285 
2286 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
2287 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2288 		if (ev->ev_events & (EV_READ|EV_WRITE))
2289 			res = evmap_io_add_(base, ev->ev_fd, ev);
2290 		else if (ev->ev_events & EV_SIGNAL)
2291 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2292 		if (res != -1)
2293 			event_queue_insert_inserted(base, ev);
2294 		if (res == 1) {
2295 			/* evmap says we need to notify the main thread. */
2296 			notify = 1;
2297 			res = 0;
2298 		}
2299 	}
2300 
2301 	/*
2302 	 * we should change the timeout state only if the previous event
2303 	 * addition succeeded.
2304 	 */
2305 	if (res != -1 && tv != NULL) {
2306 		struct timeval now;
2307 		int common_timeout;
2308 #ifdef USE_REINSERT_TIMEOUT
2309 		int was_common;
2310 		int old_timeout_idx;
2311 #endif
2312 
2313 		/*
2314 		 * for persistent timeout events, we remember the
2315 		 * timeout value and re-add the event.
2316 		 *
2317 		 * If tv_is_absolute, this was already set.
2318 		 */
2319 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2320 			ev->ev_io_timeout = *tv;
2321 
2322 #ifndef USE_REINSERT_TIMEOUT
2323 		if (ev->ev_flags & EVLIST_TIMEOUT) {
2324 			event_queue_remove_timeout(base, ev);
2325 		}
2326 #endif
2327 
2328 		/* Check if it is active due to a timeout.  Rescheduling
2329 		 * this timeout before the callback can be executed
2330 		 * removes it from the active list. */
2331 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2332 		    (ev->ev_res & EV_TIMEOUT)) {
2333 			if (ev->ev_events & EV_SIGNAL) {
2334 				/* See if we are just active executing
2335 				 * this event in a loop
2336 				 */
2337 				if (ev->ev_ncalls && ev->ev_pncalls) {
2338 					/* Abort loop */
2339 					*ev->ev_pncalls = 0;
2340 				}
2341 			}
2342 
2343 			event_queue_remove_active(base, event_to_event_callback(ev));
2344 		}
2345 
2346 		gettime(base, &now);
2347 
2348 		common_timeout = is_common_timeout(tv, base);
2349 #ifdef USE_REINSERT_TIMEOUT
2350 		was_common = is_common_timeout(&ev->ev_timeout, base);
2351 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2352 #endif
2353 
2354 		if (tv_is_absolute) {
2355 			ev->ev_timeout = *tv;
2356 		} else if (common_timeout) {
2357 			struct timeval tmp = *tv;
2358 			tmp.tv_usec &= MICROSECONDS_MASK;
2359 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2360 			ev->ev_timeout.tv_usec |=
2361 			    (tv->tv_usec & ~MICROSECONDS_MASK);
2362 		} else {
2363 			evutil_timeradd(&now, tv, &ev->ev_timeout);
2364 		}
2365 
2366 		event_debug((
2367 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2368 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2369 
2370 #ifdef USE_REINSERT_TIMEOUT
2371 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2372 #else
2373 		event_queue_insert_timeout(base, ev);
2374 #endif
2375 
2376 		if (common_timeout) {
2377 			struct common_timeout_list *ctl =
2378 			    get_common_timeout_list(base, &ev->ev_timeout);
2379 			if (ev == TAILQ_FIRST(&ctl->events)) {
2380 				common_timeout_schedule(ctl, &now, ev);
2381 			}
2382 		} else {
2383 			struct event* top = NULL;
2384 			/* See if the earliest timeout is now earlier than it
2385 			 * was before: if so, we will need to tell the main
2386 			 * thread to wake up earlier than it would otherwise.
2387 			 * We double check the timeout of the top element to
2388 			 * handle time distortions due to system suspension.
2389 			 */
2390 			if (min_heap_elt_is_top_(ev))
2391 				notify = 1;
2392 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2393 					 evutil_timercmp(&top->ev_timeout, &now, <))
2394 				notify = 1;
2395 		}
2396 	}
2397 
2398 	/* if we are not in the right thread, we need to wake up the loop */
2399 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2400 		evthread_notify_base(base);
2401 
2402 	event_debug_note_add_(ev);
2403 
2404 	return (res);
2405 }
2406 
2407 int
2408 event_del(struct event *ev)
2409 {
2410 	int res;
2411 
2412 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2413 		event_warnx("%s: event has no event_base set.", __func__);
2414 		return -1;
2415 	}
2416 
2417 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2418 
2419 	res = event_del_nolock_(ev);
2420 
2421 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2422 
2423 	return (res);
2424 }
2425 
2426 /* Helper for event_del: always called with th_base_lock held. */
2427 int
2428 event_del_nolock_(struct event *ev)
2429 {
2430 	struct event_base *base;
2431 	int res = 0, notify = 0;
2432 
2433 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2434 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2435 
2436 	/* An event without a base has not been added */
2437 	if (ev->ev_base == NULL)
2438 		return (-1);
2439 
2440 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2441 
2442 	/* If the main thread is currently executing this event's callback,
2443 	 * and we are not the main thread, then we want to wait until the
2444 	 * callback is done before we start removing the event.  That way,
2445 	 * when this function returns, it will be safe to free the
2446 	 * user-supplied argument. */
2447 	base = ev->ev_base;
2448 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2449 	if (base->current_event == event_to_event_callback(ev) &&
2450 	    !EVBASE_IN_THREAD(base)) {
2451 		++base->current_event_waiters;
2452 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2453 	}
2454 #endif
2455 
2456 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2457 
2458 	/* See if we are just active executing this event in a loop */
2459 	if (ev->ev_events & EV_SIGNAL) {
2460 		if (ev->ev_ncalls && ev->ev_pncalls) {
2461 			/* Abort loop */
2462 			*ev->ev_pncalls = 0;
2463 		}
2464 	}
2465 
2466 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2467 		/* NOTE: We never need to notify the main thread because of a
2468 		 * deleted timeout event: all that could happen if we don't is
2469 		 * that the dispatch loop might wake up too early.  But the
2470 		 * point of notifying the main thread _is_ to wake up the
2471 		 * dispatch loop early anyway, so we wouldn't gain anything by
2472 		 * doing it.
2473 		 */
2474 		event_queue_remove_timeout(base, ev);
2475 	}
2476 
2477 	if (ev->ev_flags & EVLIST_ACTIVE)
2478 		event_queue_remove_active(base, event_to_event_callback(ev));
2479 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2480 		event_queue_remove_active_later(base, event_to_event_callback(ev));
2481 
2482 	if (ev->ev_flags & EVLIST_INSERTED) {
2483 		event_queue_remove_inserted(base, ev);
2484 		if (ev->ev_events & (EV_READ|EV_WRITE))
2485 			res = evmap_io_del_(base, ev->ev_fd, ev);
2486 		else
2487 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2488 		if (res == 1) {
2489 			/* evmap says we need to notify the main thread. */
2490 			notify = 1;
2491 			res = 0;
2492 		}
2493 	}
2494 
2495 	/* if we are not in the right thread, we need to wake up the loop */
2496 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2497 		evthread_notify_base(base);
2498 
2499 	event_debug_note_del_(ev);
2500 
2501 	return (res);
2502 }
2503 
2504 void
2505 event_active(struct event *ev, int res, short ncalls)
2506 {
2507 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2508 		event_warnx("%s: event has no event_base set.", __func__);
2509 		return;
2510 	}
2511 
2512 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2513 
2514 	event_debug_assert_is_setup_(ev);
2515 
2516 	event_active_nolock_(ev, res, ncalls);
2517 
2518 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2519 }
2520 
2521 
2522 void
2523 event_active_nolock_(struct event *ev, int res, short ncalls)
2524 {
2525 	struct event_base *base;
2526 
2527 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2528 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2529 
2530 	base = ev->ev_base;
2531 	EVENT_BASE_ASSERT_LOCKED(base);
2532 
2533 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2534 	default:
2535 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2536 		EVUTIL_ASSERT(0);
2537 		break;
2538 	case EVLIST_ACTIVE:
2539 		/* We get different kinds of events, add them together */
2540 		ev->ev_res |= res;
2541 		return;
2542 	case EVLIST_ACTIVE_LATER:
2543 		ev->ev_res |= res;
2544 		break;
2545 	case 0:
2546 		ev->ev_res = res;
2547 		break;
2548 	}
2549 
2550 	if (ev->ev_pri < base->event_running_priority)
2551 		base->event_continue = 1;
2552 
2553 	if (ev->ev_events & EV_SIGNAL) {
2554 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2555 		if (base->current_event == event_to_event_callback(ev) &&
2556 		    !EVBASE_IN_THREAD(base)) {
2557 			++base->current_event_waiters;
2558 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2559 		}
2560 #endif
2561 		ev->ev_ncalls = ncalls;
2562 		ev->ev_pncalls = NULL;
2563 	}
2564 
2565 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2566 }
2567 
2568 void
2569 event_active_later_(struct event *ev, int res)
2570 {
2571 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2572 	event_active_later_nolock_(ev, res);
2573 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2574 }
2575 
2576 void
2577 event_active_later_nolock_(struct event *ev, int res)
2578 {
2579 	struct event_base *base = ev->ev_base;
2580 	EVENT_BASE_ASSERT_LOCKED(base);
2581 
2582 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2583 		/* We get different kinds of events, add them together */
2584 		ev->ev_res |= res;
2585 		return;
2586 	}
2587 
2588 	ev->ev_res = res;
2589 
2590 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2591 }
2592 
2593 int
2594 event_callback_activate_(struct event_base *base,
2595     struct event_callback *evcb)
2596 {
2597 	int r;
2598 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2599 	r = event_callback_activate_nolock_(base, evcb);
2600 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2601 	return r;
2602 }
2603 
2604 int
2605 event_callback_activate_nolock_(struct event_base *base,
2606     struct event_callback *evcb)
2607 {
2608 	int r = 1;
2609 
2610 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2611 	default:
2612 		EVUTIL_ASSERT(0);
2613 	case EVLIST_ACTIVE_LATER:
2614 		event_queue_remove_active_later(base, evcb);
2615 		r = 0;
2616 		break;
2617 	case EVLIST_ACTIVE:
2618 		return 0;
2619 	case 0:
2620 		break;
2621 	}
2622 
2623 	event_queue_insert_active(base, evcb);
2624 
2625 	if (EVBASE_NEED_NOTIFY(base))
2626 		evthread_notify_base(base);
2627 
2628 	return r;
2629 }
2630 
2631 void
2632 event_callback_activate_later_nolock_(struct event_base *base,
2633     struct event_callback *evcb)
2634 {
2635 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2636 		return;
2637 
2638 	event_queue_insert_active_later(base, evcb);
2639 	if (EVBASE_NEED_NOTIFY(base))
2640 		evthread_notify_base(base);
2641 }
2642 
2643 void
2644 event_callback_init_(struct event_base *base,
2645     struct event_callback *cb)
2646 {
2647 	memset(cb, 0, sizeof(*cb));
2648 	cb->evcb_pri = base->nactivequeues - 1;
2649 }
2650 
2651 int
2652 event_callback_cancel_(struct event_base *base,
2653     struct event_callback *evcb)
2654 {
2655 	int r;
2656 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2657 	r = event_callback_cancel_nolock_(base, evcb);
2658 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2659 	return r;
2660 }
2661 
2662 int
2663 event_callback_cancel_nolock_(struct event_base *base,
2664     struct event_callback *evcb)
2665 {
2666 	if (evcb->evcb_flags & EVLIST_INIT)
2667 		return event_del_nolock_(event_callback_to_event(evcb));
2668 
2669 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2670 	default:
2671 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2672 		EVUTIL_ASSERT(0);
2673 		break;
2674 	case EVLIST_ACTIVE:
2675 		/* We get different kinds of events, add them together */
2676 		event_queue_remove_active(base, evcb);
2677 		return 0;
2678 	case EVLIST_ACTIVE_LATER:
2679 		event_queue_remove_active_later(base, evcb);
2680 		break;
2681 	case 0:
2682 		break;
2683 	}
2684 
2685 	event_base_assert_ok_nolock_(base);
2686 
2687 	return 0;
2688 }
2689 
2690 void
2691 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2692 {
2693 	memset(cb, 0, sizeof(*cb));
2694 	cb->evcb_cb_union.evcb_selfcb = fn;
2695 	cb->evcb_arg = arg;
2696 	cb->evcb_pri = priority;
2697 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
2698 }
2699 
2700 void
2701 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
2702 {
2703 	cb->evcb_pri = priority;
2704 }
2705 
2706 void
2707 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
2708 {
2709 	if (!base)
2710 		base = current_base;
2711 	event_callback_cancel_(base, cb);
2712 }
2713 
2714 #define MAX_DEFERREDS_QUEUED 32
2715 int
2716 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
2717 {
2718 	int r = 1;
2719 	if (!base)
2720 		base = current_base;
2721 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2722 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
2723 		event_callback_activate_later_nolock_(base, cb);
2724 	} else {
2725 		++base->n_deferreds_queued;
2726 		r = event_callback_activate_nolock_(base, cb);
2727 	}
2728 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2729 	return r;
2730 }
2731 
2732 static int
2733 timeout_next(struct event_base *base, struct timeval **tv_p)
2734 {
2735 	/* Caller must hold th_base_lock */
2736 	struct timeval now;
2737 	struct event *ev;
2738 	struct timeval *tv = *tv_p;
2739 	int res = 0;
2740 
2741 	ev = min_heap_top_(&base->timeheap);
2742 
2743 	if (ev == NULL) {
2744 		/* if no time-based events are active wait for I/O */
2745 		*tv_p = NULL;
2746 		goto out;
2747 	}
2748 
2749 	if (gettime(base, &now) == -1) {
2750 		res = -1;
2751 		goto out;
2752 	}
2753 
2754 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
2755 		evutil_timerclear(tv);
2756 		goto out;
2757 	}
2758 
2759 	evutil_timersub(&ev->ev_timeout, &now, tv);
2760 
2761 	EVUTIL_ASSERT(tv->tv_sec >= 0);
2762 	EVUTIL_ASSERT(tv->tv_usec >= 0);
2763 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
2764 
2765 out:
2766 	return (res);
2767 }
2768 
2769 /* Activate every event whose timeout has elapsed. */
2770 static void
2771 timeout_process(struct event_base *base)
2772 {
2773 	/* Caller must hold lock. */
2774 	struct timeval now;
2775 	struct event *ev;
2776 
2777 	if (min_heap_empty_(&base->timeheap)) {
2778 		return;
2779 	}
2780 
2781 	gettime(base, &now);
2782 
2783 	while ((ev = min_heap_top_(&base->timeheap))) {
2784 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
2785 			break;
2786 
2787 		/* delete this event from the I/O queues */
2788 		event_del_nolock_(ev);
2789 
2790 		event_debug(("timeout_process: event: %p, call %p",
2791 			 ev, ev->ev_callback));
2792 		event_active_nolock_(ev, EV_TIMEOUT, 1);
2793 	}
2794 }
2795 
2796 #if (EVLIST_INTERNAL >> 4) != 1
2797 #error "Mismatch for value of EVLIST_INTERNAL"
2798 #endif
2799 /* These are a fancy way to spell
2800      if (flags & EVLIST_INTERNAL)
2801          base->event_count--/++;
2802 */
2803 #define DECR_EVENT_COUNT(base,flags) \
2804 	((base)->event_count -= (~((flags) >> 4) & 1))
2805 #define INCR_EVENT_COUNT(base,flags) \
2806 	((base)->event_count += (~((flags) >> 4) & 1))
2807 
2808 static void
2809 event_queue_remove_inserted(struct event_base *base, struct event *ev)
2810 {
2811 	EVENT_BASE_ASSERT_LOCKED(base);
2812 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
2813 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
2814 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
2815 		return;
2816 	}
2817 	DECR_EVENT_COUNT(base, ev->ev_flags);
2818 	ev->ev_flags &= ~EVLIST_INSERTED;
2819 }
2820 static void
2821 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
2822 {
2823 	EVENT_BASE_ASSERT_LOCKED(base);
2824 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
2825 		event_errx(1, "%s: %p not on queue %x", __func__,
2826 			   evcb, EVLIST_ACTIVE);
2827 		return;
2828 	}
2829 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
2830 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
2831 	base->event_count_active--;
2832 
2833 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
2834 	    evcb, evcb_active_next);
2835 }
2836 static void
2837 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
2838 {
2839 	EVENT_BASE_ASSERT_LOCKED(base);
2840 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
2841 		event_errx(1, "%s: %p not on queue %x", __func__,
2842 			   evcb, EVLIST_ACTIVE_LATER);
2843 		return;
2844 	}
2845 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
2846 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
2847 	base->event_count_active--;
2848 
2849 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
2850 }
2851 static void
2852 event_queue_remove_timeout(struct event_base *base, struct event *ev)
2853 {
2854 	EVENT_BASE_ASSERT_LOCKED(base);
2855 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
2856 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
2857 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
2858 		return;
2859 	}
2860 	DECR_EVENT_COUNT(base, ev->ev_flags);
2861 	ev->ev_flags &= ~EVLIST_TIMEOUT;
2862 
2863 	if (is_common_timeout(&ev->ev_timeout, base)) {
2864 		struct common_timeout_list *ctl =
2865 		    get_common_timeout_list(base, &ev->ev_timeout);
2866 		TAILQ_REMOVE(&ctl->events, ev,
2867 		    ev_timeout_pos.ev_next_with_common_timeout);
2868 	} else {
2869 		min_heap_erase_(&base->timeheap, ev);
2870 	}
2871 }
2872 
2873 #ifdef USE_REINSERT_TIMEOUT
2874 /* Remove and reinsert 'ev' into the timeout queue. */
2875 static void
2876 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
2877     int was_common, int is_common, int old_timeout_idx)
2878 {
2879 	struct common_timeout_list *ctl;
2880 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
2881 		event_queue_insert_timeout(base, ev);
2882 		return;
2883 	}
2884 
2885 	switch ((was_common<<1) | is_common) {
2886 	case 3: /* Changing from one common timeout to another */
2887 		ctl = base->common_timeout_queues[old_timeout_idx];
2888 		TAILQ_REMOVE(&ctl->events, ev,
2889 		    ev_timeout_pos.ev_next_with_common_timeout);
2890 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
2891 		insert_common_timeout_inorder(ctl, ev);
2892 		break;
2893 	case 2: /* Was common; is no longer common */
2894 		ctl = base->common_timeout_queues[old_timeout_idx];
2895 		TAILQ_REMOVE(&ctl->events, ev,
2896 		    ev_timeout_pos.ev_next_with_common_timeout);
2897 		min_heap_push_(&base->timeheap, ev);
2898 		break;
2899 	case 1: /* Wasn't common; has become common. */
2900 		min_heap_erase_(&base->timeheap, ev);
2901 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
2902 		insert_common_timeout_inorder(ctl, ev);
2903 		break;
2904 	case 0: /* was in heap; is still on heap. */
2905 		min_heap_adjust_(&base->timeheap, ev);
2906 		break;
2907 	default:
2908 		EVUTIL_ASSERT(0); /* unreachable */
2909 		break;
2910 	}
2911 }
2912 #endif
2913 
2914 /* Add 'ev' to the common timeout list in 'ev'. */
2915 static void
2916 insert_common_timeout_inorder(struct common_timeout_list *ctl,
2917     struct event *ev)
2918 {
2919 	struct event *e;
2920 	/* By all logic, we should just be able to append 'ev' to the end of
2921 	 * ctl->events, since the timeout on each 'ev' is set to {the common
2922 	 * timeout} + {the time when we add the event}, and so the events
2923 	 * should arrive in order of their timeeouts.  But just in case
2924 	 * there's some wacky threading issue going on, we do a search from
2925 	 * the end of 'ev' to find the right insertion point.
2926 	 */
2927 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
2928 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
2929 		/* This timercmp is a little sneaky, since both ev and e have
2930 		 * magic values in tv_usec.  Fortunately, they ought to have
2931 		 * the _same_ magic values in tv_usec.  Let's assert for that.
2932 		 */
2933 		EVUTIL_ASSERT(
2934 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
2935 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
2936 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
2937 			    ev_timeout_pos.ev_next_with_common_timeout);
2938 			return;
2939 		}
2940 	}
2941 	TAILQ_INSERT_HEAD(&ctl->events, ev,
2942 	    ev_timeout_pos.ev_next_with_common_timeout);
2943 }
2944 
2945 static void
2946 event_queue_insert_inserted(struct event_base *base, struct event *ev)
2947 {
2948 	EVENT_BASE_ASSERT_LOCKED(base);
2949 
2950 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
2951 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
2952 		    ev, EV_SOCK_ARG(ev->ev_fd));
2953 		return;
2954 	}
2955 
2956 	INCR_EVENT_COUNT(base, ev->ev_flags);
2957 
2958 	ev->ev_flags |= EVLIST_INSERTED;
2959 }
2960 
2961 static void
2962 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
2963 {
2964 	EVENT_BASE_ASSERT_LOCKED(base);
2965 
2966 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
2967 		/* Double insertion is possible for active events */
2968 		return;
2969 	}
2970 
2971 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
2972 
2973 	evcb->evcb_flags |= EVLIST_ACTIVE;
2974 
2975 	base->event_count_active++;
2976 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
2977 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
2978 	    evcb, evcb_active_next);
2979 }
2980 
2981 static void
2982 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
2983 {
2984 	EVENT_BASE_ASSERT_LOCKED(base);
2985 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
2986 		/* Double insertion is possible */
2987 		return;
2988 	}
2989 
2990 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
2991 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
2992 	base->event_count_active++;
2993 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
2994 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
2995 }
2996 
2997 static void
2998 event_queue_insert_timeout(struct event_base *base, struct event *ev)
2999 {
3000 	EVENT_BASE_ASSERT_LOCKED(base);
3001 
3002 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3003 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3004 		    ev, EV_SOCK_ARG(ev->ev_fd));
3005 		return;
3006 	}
3007 
3008 	INCR_EVENT_COUNT(base, ev->ev_flags);
3009 
3010 	ev->ev_flags |= EVLIST_TIMEOUT;
3011 
3012 	if (is_common_timeout(&ev->ev_timeout, base)) {
3013 		struct common_timeout_list *ctl =
3014 		    get_common_timeout_list(base, &ev->ev_timeout);
3015 		insert_common_timeout_inorder(ctl, ev);
3016 	} else {
3017 		min_heap_push_(&base->timeheap, ev);
3018 	}
3019 }
3020 
3021 static void
3022 event_queue_make_later_events_active(struct event_base *base)
3023 {
3024 	struct event_callback *evcb;
3025 	EVENT_BASE_ASSERT_LOCKED(base);
3026 
3027 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3028 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3029 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3030 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3031 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3032 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3033 	}
3034 }
3035 
3036 /* Functions for debugging */
3037 
3038 const char *
3039 event_get_version(void)
3040 {
3041 	return (EVENT__VERSION);
3042 }
3043 
3044 ev_uint32_t
3045 event_get_version_number(void)
3046 {
3047 	return (EVENT__NUMERIC_VERSION);
3048 }
3049 
3050 /*
3051  * No thread-safe interface needed - the information should be the same
3052  * for all threads.
3053  */
3054 
3055 const char *
3056 event_get_method(void)
3057 {
3058 	return (current_base->evsel->name);
3059 }
3060 
3061 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3062 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3063 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3064 static void (*mm_free_fn_)(void *p) = NULL;
3065 
3066 void *
3067 event_mm_malloc_(size_t sz)
3068 {
3069 	if (sz == 0)
3070 		return NULL;
3071 
3072 	if (mm_malloc_fn_)
3073 		return mm_malloc_fn_(sz);
3074 	else
3075 		return malloc(sz);
3076 }
3077 
3078 void *
3079 event_mm_calloc_(size_t count, size_t size)
3080 {
3081 	if (count == 0 || size == 0)
3082 		return NULL;
3083 
3084 	if (mm_malloc_fn_) {
3085 		size_t sz = count * size;
3086 		void *p = NULL;
3087 		if (count > EV_SIZE_MAX / size)
3088 			goto error;
3089 		p = mm_malloc_fn_(sz);
3090 		if (p)
3091 			return memset(p, 0, sz);
3092 	} else {
3093 		void *p = calloc(count, size);
3094 #ifdef _WIN32
3095 		/* Windows calloc doesn't reliably set ENOMEM */
3096 		if (p == NULL)
3097 			goto error;
3098 #endif
3099 		return p;
3100 	}
3101 
3102 error:
3103 	errno = ENOMEM;
3104 	return NULL;
3105 }
3106 
3107 char *
3108 event_mm_strdup_(const char *str)
3109 {
3110 	if (!str) {
3111 		errno = EINVAL;
3112 		return NULL;
3113 	}
3114 
3115 	if (mm_malloc_fn_) {
3116 		size_t ln = strlen(str);
3117 		void *p = NULL;
3118 		if (ln == EV_SIZE_MAX)
3119 			goto error;
3120 		p = mm_malloc_fn_(ln+1);
3121 		if (p)
3122 			return memcpy(p, str, ln+1);
3123 	} else
3124 #ifdef _WIN32
3125 		return _strdup(str);
3126 #else
3127 		return strdup(str);
3128 #endif
3129 
3130 error:
3131 	errno = ENOMEM;
3132 	return NULL;
3133 }
3134 
3135 void *
3136 event_mm_realloc_(void *ptr, size_t sz)
3137 {
3138 	if (mm_realloc_fn_)
3139 		return mm_realloc_fn_(ptr, sz);
3140 	else
3141 		return realloc(ptr, sz);
3142 }
3143 
3144 void
3145 event_mm_free_(void *ptr)
3146 {
3147 	if (mm_free_fn_)
3148 		mm_free_fn_(ptr);
3149 	else
3150 		free(ptr);
3151 }
3152 
3153 void
3154 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3155 			void *(*realloc_fn)(void *ptr, size_t sz),
3156 			void (*free_fn)(void *ptr))
3157 {
3158 	mm_malloc_fn_ = malloc_fn;
3159 	mm_realloc_fn_ = realloc_fn;
3160 	mm_free_fn_ = free_fn;
3161 }
3162 #endif
3163 
3164 #ifdef EVENT__HAVE_EVENTFD
3165 static void
3166 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3167 {
3168 	ev_uint64_t msg;
3169 	ev_ssize_t r;
3170 	struct event_base *base = arg;
3171 
3172 	r = read(fd, (void*) &msg, sizeof(msg));
3173 	if (r<0 && errno != EAGAIN) {
3174 		event_sock_warn(fd, "Error reading from eventfd");
3175 	}
3176 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3177 	base->is_notify_pending = 0;
3178 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3179 }
3180 #endif
3181 
3182 static void
3183 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3184 {
3185 	unsigned char buf[1024];
3186 	struct event_base *base = arg;
3187 #ifdef _WIN32
3188 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3189 		;
3190 #else
3191 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3192 		;
3193 #endif
3194 
3195 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3196 	base->is_notify_pending = 0;
3197 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3198 }
3199 
3200 int
3201 evthread_make_base_notifiable(struct event_base *base)
3202 {
3203 	int r;
3204 	if (!base)
3205 		return -1;
3206 
3207 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3208 	r = evthread_make_base_notifiable_nolock_(base);
3209 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3210 	return r;
3211 }
3212 
3213 static int
3214 evthread_make_base_notifiable_nolock_(struct event_base *base)
3215 {
3216 	void (*cb)(evutil_socket_t, short, void *);
3217 	int (*notify)(struct event_base *);
3218 
3219 	if (base->th_notify_fn != NULL) {
3220 		/* The base is already notifiable: we're doing fine. */
3221 		return 0;
3222 	}
3223 
3224 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3225 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3226 		base->th_notify_fn = event_kq_notify_base_;
3227 		/* No need to add an event here; the backend can wake
3228 		 * itself up just fine. */
3229 		return 0;
3230 	}
3231 #endif
3232 
3233 #ifdef EVENT__HAVE_EVENTFD
3234 	base->th_notify_fd[0] = evutil_eventfd_(0,
3235 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3236 	if (base->th_notify_fd[0] >= 0) {
3237 		base->th_notify_fd[1] = -1;
3238 		notify = evthread_notify_base_eventfd;
3239 		cb = evthread_notify_drain_eventfd;
3240 	} else
3241 #endif
3242 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3243 		notify = evthread_notify_base_default;
3244 		cb = evthread_notify_drain_default;
3245 	} else {
3246 		return -1;
3247 	}
3248 
3249 	base->th_notify_fn = notify;
3250 
3251 	/* prepare an event that we can use for wakeup */
3252 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3253 				 EV_READ|EV_PERSIST, cb, base);
3254 
3255 	/* we need to mark this as internal event */
3256 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3257 	event_priority_set(&base->th_notify, 0);
3258 
3259 	return event_add_nolock_(&base->th_notify, NULL, 0);
3260 }
3261 
3262 int
3263 event_base_foreach_event_nolock_(struct event_base *base,
3264     event_base_foreach_event_cb fn, void *arg)
3265 {
3266 	int r, i;
3267 	unsigned u;
3268 	struct event *ev;
3269 
3270 	/* Start out with all the EVLIST_INSERTED events. */
3271 	if ((r = evmap_foreach_event_(base, fn, arg)))
3272 		return r;
3273 
3274 	/* Okay, now we deal with those events that have timeouts and are in
3275 	 * the min-heap. */
3276 	for (u = 0; u < base->timeheap.n; ++u) {
3277 		ev = base->timeheap.p[u];
3278 		if (ev->ev_flags & EVLIST_INSERTED) {
3279 			/* we already processed this one */
3280 			continue;
3281 		}
3282 		if ((r = fn(base, ev, arg)))
3283 			return r;
3284 	}
3285 
3286 	/* Now for the events in one of the timeout queues.
3287 	 * the min-heap. */
3288 	for (i = 0; i < base->n_common_timeouts; ++i) {
3289 		struct common_timeout_list *ctl =
3290 		    base->common_timeout_queues[i];
3291 		TAILQ_FOREACH(ev, &ctl->events,
3292 		    ev_timeout_pos.ev_next_with_common_timeout) {
3293 			if (ev->ev_flags & EVLIST_INSERTED) {
3294 				/* we already processed this one */
3295 				continue;
3296 			}
3297 			if ((r = fn(base, ev, arg)))
3298 				return r;
3299 		}
3300 	}
3301 
3302 	/* Finally, we deal wit all the active events that we haven't touched
3303 	 * yet. */
3304 	for (i = 0; i < base->nactivequeues; ++i) {
3305 		struct event_callback *evcb;
3306 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3307 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3308 				/* This isn't an event (evlist_init clear), or
3309 				 * we already processed it. (inserted or
3310 				 * timeout set */
3311 				continue;
3312 			}
3313 			ev = event_callback_to_event(evcb);
3314 			if ((r = fn(base, ev, arg)))
3315 				return r;
3316 		}
3317 	}
3318 
3319 	return 0;
3320 }
3321 
3322 /* Helper for event_base_dump_events: called on each event in the event base;
3323  * dumps only the inserted events. */
3324 static int
3325 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3326 {
3327 	FILE *output = arg;
3328 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3329 	    "sig" : "fd ";
3330 
3331 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3332 		return 0;
3333 
3334 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s",
3335 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3336 	    (e->ev_events&EV_READ)?" Read":"",
3337 	    (e->ev_events&EV_WRITE)?" Write":"",
3338 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3339 	    (e->ev_events&EV_PERSIST)?" Persist":"",
3340 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3341 	if (e->ev_flags & EVLIST_TIMEOUT) {
3342 		struct timeval tv;
3343 		tv.tv_sec = e->ev_timeout.tv_sec;
3344 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3345 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3346 		fprintf(output, " Timeout=%ld.%06d",
3347 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3348 	}
3349 	fputc('\n', output);
3350 
3351 	return 0;
3352 }
3353 
3354 /* Helper for event_base_dump_events: called on each event in the event base;
3355  * dumps only the active events. */
3356 static int
3357 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3358 {
3359 	FILE *output = arg;
3360 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3361 	    "sig" : "fd ";
3362 
3363 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3364 		return 0;
3365 
3366 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s active%s%s\n",
3367 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3368 	    (e->ev_res&EV_READ)?" Read":"",
3369 	    (e->ev_res&EV_WRITE)?" Write":"",
3370 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3371 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3372 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3373 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3374 
3375 	return 0;
3376 }
3377 
3378 int
3379 event_base_foreach_event(struct event_base *base,
3380     event_base_foreach_event_cb fn, void *arg)
3381 {
3382 	int r;
3383 	if ((!fn) || (!base)) {
3384 		return -1;
3385 	}
3386 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3387 	r = event_base_foreach_event_nolock_(base, fn, arg);
3388 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3389 	return r;
3390 }
3391 
3392 
3393 void
3394 event_base_dump_events(struct event_base *base, FILE *output)
3395 {
3396 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3397 	fprintf(output, "Inserted events:\n");
3398 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3399 
3400 	fprintf(output, "Active events:\n");
3401 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3402 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3403 }
3404 
3405 void
3406 event_base_add_virtual_(struct event_base *base)
3407 {
3408 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3409 	base->virtual_event_count++;
3410 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3411 }
3412 
3413 void
3414 event_base_del_virtual_(struct event_base *base)
3415 {
3416 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3417 	EVUTIL_ASSERT(base->virtual_event_count > 0);
3418 	base->virtual_event_count--;
3419 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3420 		evthread_notify_base(base);
3421 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3422 }
3423 
3424 static void
3425 event_free_debug_globals_locks(void)
3426 {
3427 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3428 #ifndef EVENT__DISABLE_DEBUG_MODE
3429 	if (event_debug_map_lock_ != NULL) {
3430 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3431 		event_debug_map_lock_ = NULL;
3432 	}
3433 #endif /* EVENT__DISABLE_DEBUG_MODE */
3434 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3435 	return;
3436 }
3437 
3438 static void
3439 event_free_debug_globals(void)
3440 {
3441 	event_free_debug_globals_locks();
3442 }
3443 
3444 static void
3445 event_free_evsig_globals(void)
3446 {
3447 	evsig_free_globals_();
3448 }
3449 
3450 static void
3451 event_free_evutil_globals(void)
3452 {
3453 	evutil_free_globals_();
3454 }
3455 
3456 static void
3457 event_free_globals(void)
3458 {
3459 	event_free_debug_globals();
3460 	event_free_evsig_globals();
3461 	event_free_evutil_globals();
3462 }
3463 
3464 void
3465 libevent_global_shutdown(void)
3466 {
3467 	event_free_globals();
3468 }
3469 
3470 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3471 int
3472 event_global_setup_locks_(const int enable_locks)
3473 {
3474 #ifndef EVENT__DISABLE_DEBUG_MODE
3475 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3476 #endif
3477 	if (evsig_global_setup_locks_(enable_locks) < 0)
3478 		return -1;
3479 	if (evutil_global_setup_locks_(enable_locks) < 0)
3480 		return -1;
3481 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3482 		return -1;
3483 	return 0;
3484 }
3485 #endif
3486 
3487 void
3488 event_base_assert_ok_(struct event_base *base)
3489 {
3490 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3491 	event_base_assert_ok_nolock_(base);
3492 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3493 }
3494 
3495 void
3496 event_base_assert_ok_nolock_(struct event_base *base)
3497 {
3498 	int i;
3499 	int count;
3500 
3501 	/* First do checks on the per-fd and per-signal lists */
3502 	evmap_check_integrity_(base);
3503 
3504 	/* Check the heap property */
3505 	for (i = 1; i < (int)base->timeheap.n; ++i) {
3506 		int parent = (i - 1) / 2;
3507 		struct event *ev, *p_ev;
3508 		ev = base->timeheap.p[i];
3509 		p_ev = base->timeheap.p[parent];
3510 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3511 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3512 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3513 	}
3514 
3515 	/* Check that the common timeouts are fine */
3516 	for (i = 0; i < base->n_common_timeouts; ++i) {
3517 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3518 		struct event *last=NULL, *ev;
3519 
3520 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3521 
3522 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3523 			if (last)
3524 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3525 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3526 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3527 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3528 			last = ev;
3529 		}
3530 	}
3531 
3532 	/* Check the active queues. */
3533 	count = 0;
3534 	for (i = 0; i < base->nactivequeues; ++i) {
3535 		struct event_callback *evcb;
3536 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3537 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3538 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3539 			EVUTIL_ASSERT(evcb->evcb_pri == i);
3540 			++count;
3541 		}
3542 	}
3543 
3544 	{
3545 		struct event_callback *evcb;
3546 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3547 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3548 			++count;
3549 		}
3550 	}
3551 	EVUTIL_ASSERT(count == base->event_count_active);
3552 }
3553