xref: /netbsd-src/external/bsd/ntp/dist/sntp/libevent/event.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: event.c,v 1.5 2016/01/08 21:35:40 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
5  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #include "event2/event-config.h"
30 #include "evconfig-private.h"
31 
32 #ifdef _WIN32
33 #include <winsock2.h>
34 #define WIN32_LEAN_AND_MEAN
35 #include <windows.h>
36 #undef WIN32_LEAN_AND_MEAN
37 #endif
38 #include <sys/types.h>
39 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/time.h>
41 #endif
42 #include <sys/queue.h>
43 #ifdef EVENT__HAVE_SYS_SOCKET_H
44 #include <sys/socket.h>
45 #endif
46 #include <stdio.h>
47 #include <stdlib.h>
48 #ifdef EVENT__HAVE_UNISTD_H
49 #include <unistd.h>
50 #endif
51 #include <ctype.h>
52 #include <errno.h>
53 #include <signal.h>
54 #include <string.h>
55 #include <time.h>
56 #include <limits.h>
57 
58 #include "event2/event.h"
59 #include "event2/event_struct.h"
60 #include "event2/event_compat.h"
61 #include "event-internal.h"
62 #include "defer-internal.h"
63 #include "evthread-internal.h"
64 #include "event2/thread.h"
65 #include "event2/util.h"
66 #include "log-internal.h"
67 #include "evmap-internal.h"
68 #include "iocp-internal.h"
69 #include "changelist-internal.h"
70 #define HT_NO_CACHE_HASH_VALUES
71 #include "ht-internal.h"
72 #include "util-internal.h"
73 
74 
75 #ifdef EVENT__HAVE_WORKING_KQUEUE
76 #include "kqueue-internal.h"
77 #endif
78 
79 #ifdef EVENT__HAVE_EVENT_PORTS
80 extern const struct eventop evportops;
81 #endif
82 #ifdef EVENT__HAVE_SELECT
83 extern const struct eventop selectops;
84 #endif
85 #ifdef EVENT__HAVE_POLL
86 extern const struct eventop pollops;
87 #endif
88 #ifdef EVENT__HAVE_EPOLL
89 extern const struct eventop epollops;
90 #endif
91 #ifdef EVENT__HAVE_WORKING_KQUEUE
92 extern const struct eventop kqops;
93 #endif
94 #ifdef EVENT__HAVE_DEVPOLL
95 extern const struct eventop devpollops;
96 #endif
97 #ifdef _WIN32
98 extern const struct eventop win32ops;
99 #endif
100 
101 /* Array of backends in order of preference. */
102 static const struct eventop *eventops[] = {
103 #ifdef EVENT__HAVE_EVENT_PORTS
104 	&evportops,
105 #endif
106 #ifdef EVENT__HAVE_WORKING_KQUEUE
107 	&kqops,
108 #endif
109 #ifdef EVENT__HAVE_EPOLL
110 	&epollops,
111 #endif
112 #ifdef EVENT__HAVE_DEVPOLL
113 	&devpollops,
114 #endif
115 #ifdef EVENT__HAVE_POLL
116 	&pollops,
117 #endif
118 #ifdef EVENT__HAVE_SELECT
119 	&selectops,
120 #endif
121 #ifdef _WIN32
122 	&win32ops,
123 #endif
124 	NULL
125 };
126 
127 /* Global state; deprecated */
128 struct event_base *event_global_current_base_ = NULL;
129 #define current_base event_global_current_base_
130 
131 /* Global state */
132 
133 static void *event_self_cbarg_ptr_ = NULL;
134 
135 /* Prototypes */
136 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
137 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
138 static void	event_queue_insert_timeout(struct event_base *, struct event *);
139 static void	event_queue_insert_inserted(struct event_base *, struct event *);
140 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
141 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
142 static void	event_queue_remove_timeout(struct event_base *, struct event *);
143 static void	event_queue_remove_inserted(struct event_base *, struct event *);
144 static void event_queue_make_later_events_active(struct event_base *base);
145 
146 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
147 static int event_del_(struct event *ev, int blocking);
148 
149 #ifdef USE_REINSERT_TIMEOUT
150 /* This code seems buggy; only turn it on if we find out what the trouble is. */
151 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
152 #endif
153 
154 static int	event_haveevents(struct event_base *);
155 
156 static int	event_process_active(struct event_base *);
157 
158 static int	timeout_next(struct event_base *, struct timeval **);
159 static void	timeout_process(struct event_base *);
160 
161 static inline void	event_signal_closure(struct event_base *, struct event *ev);
162 static inline void	event_persist_closure(struct event_base *, struct event *ev);
163 
164 static int	evthread_notify_base(struct event_base *base);
165 
166 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
167     struct event *ev);
168 
169 #ifndef EVENT__DISABLE_DEBUG_MODE
170 /* These functions implement a hashtable of which 'struct event *' structures
171  * have been setup or added.  We don't want to trust the content of the struct
172  * event itself, since we're trying to work through cases where an event gets
173  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
174  */
175 
176 struct event_debug_entry {
177 	HT_ENTRY(event_debug_entry) node;
178 	const struct event *ptr;
179 	unsigned added : 1;
180 };
181 
182 static inline unsigned
183 hash_debug_entry(const struct event_debug_entry *e)
184 {
185 	/* We need to do this silliness to convince compilers that we
186 	 * honestly mean to cast e->ptr to an integer, and discard any
187 	 * part of it that doesn't fit in an unsigned.
188 	 */
189 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
190 	/* Our hashtable implementation is pretty sensitive to low bits,
191 	 * and every struct event is over 64 bytes in size, so we can
192 	 * just say >>6. */
193 	return (u >> 6);
194 }
195 
196 static inline int
197 eq_debug_entry(const struct event_debug_entry *a,
198     const struct event_debug_entry *b)
199 {
200 	return a->ptr == b->ptr;
201 }
202 
203 int event_debug_mode_on_ = 0;
204 /* Set if it's too late to enable event_debug_mode. */
205 static int event_debug_mode_too_late = 0;
206 #ifndef EVENT__DISABLE_THREAD_SUPPORT
207 static void *event_debug_map_lock_ = NULL;
208 #endif
209 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
210 	HT_INITIALIZER();
211 
212 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
213     eq_debug_entry)
214 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
215     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
216 
217 /* Macro: record that ev is now setup (that is, ready for an add) */
218 #define event_debug_note_setup_(ev) do {				\
219 	if (event_debug_mode_on_) {					\
220 		struct event_debug_entry *dent,find;			\
221 		find.ptr = (ev);					\
222 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
223 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
224 		if (dent) {						\
225 			dent->added = 0;				\
226 		} else {						\
227 			dent = mm_malloc(sizeof(*dent));		\
228 			if (!dent)					\
229 				event_err(1,				\
230 				    "Out of memory in debugging code");	\
231 			dent->ptr = (ev);				\
232 			dent->added = 0;				\
233 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
234 		}							\
235 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
236 	}								\
237 	event_debug_mode_too_late = 1;					\
238 	} while (0)
239 /* Macro: record that ev is no longer setup */
240 #define event_debug_note_teardown_(ev) do {				\
241 	if (event_debug_mode_on_) {					\
242 		struct event_debug_entry *dent,find;			\
243 		find.ptr = (ev);					\
244 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
245 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
246 		if (dent)						\
247 			mm_free(dent);					\
248 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
249 	}								\
250 	event_debug_mode_too_late = 1;					\
251 	} while (0)
252 /* Macro: record that ev is now added */
253 #define event_debug_note_add_(ev)	do {				\
254 	if (event_debug_mode_on_) {					\
255 		struct event_debug_entry *dent,find;			\
256 		find.ptr = (ev);					\
257 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
258 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
259 		if (dent) {						\
260 			dent->added = 1;				\
261 		} else {						\
262 			event_errx(EVENT_ERR_ABORT_,			\
263 			    "%s: noting an add on a non-setup event %p" \
264 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
265 			    ", flags: 0x%x)",				\
266 			    __func__, (ev), (ev)->ev_events,		\
267 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
268 		}							\
269 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
270 	}								\
271 	event_debug_mode_too_late = 1;					\
272 	} while (0)
273 /* Macro: record that ev is no longer added */
274 #define event_debug_note_del_(ev) do {					\
275 	if (event_debug_mode_on_) {					\
276 		struct event_debug_entry *dent,find;			\
277 		find.ptr = (ev);					\
278 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
279 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
280 		if (dent) {						\
281 			dent->added = 0;				\
282 		} else {						\
283 			event_errx(EVENT_ERR_ABORT_,			\
284 			    "%s: noting a del on a non-setup event %p"	\
285 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
286 			    ", flags: 0x%x)",				\
287 			    __func__, (ev), (ev)->ev_events,		\
288 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
289 		}							\
290 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
291 	}								\
292 	event_debug_mode_too_late = 1;					\
293 	} while (0)
294 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
295 #define event_debug_assert_is_setup_(ev) do {				\
296 	if (event_debug_mode_on_) {					\
297 		struct event_debug_entry *dent,find;			\
298 		find.ptr = (ev);					\
299 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
300 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
301 		if (!dent) {						\
302 			event_errx(EVENT_ERR_ABORT_,			\
303 			    "%s called on a non-initialized event %p"	\
304 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
305 			    ", flags: 0x%x)",				\
306 			    __func__, (ev), (ev)->ev_events,		\
307 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
308 		}							\
309 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
310 	}								\
311 	} while (0)
312 /* Macro: assert that ev is not added (i.e., okay to tear down or set
313  * up again) */
314 #define event_debug_assert_not_added_(ev) do {				\
315 	if (event_debug_mode_on_) {					\
316 		struct event_debug_entry *dent,find;			\
317 		find.ptr = (ev);					\
318 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
319 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
320 		if (dent && dent->added) {				\
321 			event_errx(EVENT_ERR_ABORT_,			\
322 			    "%s called on an already added event %p"	\
323 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
324 			    "flags: 0x%x)",				\
325 			    __func__, (ev), (ev)->ev_events,		\
326 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
327 		}							\
328 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
329 	}								\
330 	} while (0)
331 #else
332 #define event_debug_note_setup_(ev) \
333 	((void)0)
334 #define event_debug_note_teardown_(ev) \
335 	((void)0)
336 #define event_debug_note_add_(ev) \
337 	((void)0)
338 #define event_debug_note_del_(ev) \
339 	((void)0)
340 #define event_debug_assert_is_setup_(ev) \
341 	((void)0)
342 #define event_debug_assert_not_added_(ev) \
343 	((void)0)
344 #endif
345 
346 #define EVENT_BASE_ASSERT_LOCKED(base)		\
347 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
348 
349 /* How often (in seconds) do we check for changes in wall clock time relative
350  * to monotonic time?  Set this to -1 for 'never.' */
351 #define CLOCK_SYNC_INTERVAL 5
352 
353 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
354  * on 'base'.  If there is a cached time, return it.  Otherwise, use
355  * clock_gettime or gettimeofday as appropriate to find out the right time.
356  * Return 0 on success, -1 on failure.
357  */
358 static int
359 gettime(struct event_base *base, struct timeval *tp)
360 {
361 	EVENT_BASE_ASSERT_LOCKED(base);
362 
363 	if (base->tv_cache.tv_sec) {
364 		*tp = base->tv_cache;
365 		return (0);
366 	}
367 
368 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
369 		return -1;
370 	}
371 
372 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
373 	    < tp->tv_sec) {
374 		struct timeval tv;
375 		evutil_gettimeofday(&tv,NULL);
376 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
377 		base->last_updated_clock_diff = tp->tv_sec;
378 	}
379 
380 	return 0;
381 }
382 
383 int
384 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
385 {
386 	int r;
387 	if (!base) {
388 		base = current_base;
389 		if (!current_base)
390 			return evutil_gettimeofday(tv, NULL);
391 	}
392 
393 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
394 	if (base->tv_cache.tv_sec == 0) {
395 		r = evutil_gettimeofday(tv, NULL);
396 	} else {
397 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
398 		r = 0;
399 	}
400 	EVBASE_RELEASE_LOCK(base, th_base_lock);
401 	return r;
402 }
403 
404 /** Make 'base' have no current cached time. */
405 static inline void
406 clear_time_cache(struct event_base *base)
407 {
408 	base->tv_cache.tv_sec = 0;
409 }
410 
411 /** Replace the cached time in 'base' with the current time. */
412 static inline void
413 update_time_cache(struct event_base *base)
414 {
415 	base->tv_cache.tv_sec = 0;
416 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
417 	    gettime(base, &base->tv_cache);
418 }
419 
420 int
421 event_base_update_cache_time(struct event_base *base)
422 {
423 
424 	if (!base) {
425 		base = current_base;
426 		if (!current_base)
427 			return -1;
428 	}
429 
430 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
431 	if (base->running_loop)
432 		update_time_cache(base);
433 	EVBASE_RELEASE_LOCK(base, th_base_lock);
434 	return 0;
435 }
436 
437 static inline struct event *
438 event_callback_to_event(struct event_callback *evcb)
439 {
440 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
441 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
442 }
443 
444 static inline struct event_callback *
445 event_to_event_callback(struct event *ev)
446 {
447 	return &ev->ev_evcallback;
448 }
449 
450 struct event_base *
451 event_init(void)
452 {
453 	struct event_base *base = event_base_new_with_config(NULL);
454 
455 	if (base == NULL) {
456 		event_errx(1, "%s: Unable to construct event_base", __func__);
457 		return NULL;
458 	}
459 
460 	current_base = base;
461 
462 	return (base);
463 }
464 
465 struct event_base *
466 event_base_new(void)
467 {
468 	struct event_base *base = NULL;
469 	struct event_config *cfg = event_config_new();
470 	if (cfg) {
471 		base = event_base_new_with_config(cfg);
472 		event_config_free(cfg);
473 	}
474 	return base;
475 }
476 
477 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
478  * avoid. */
479 static int
480 event_config_is_avoided_method(const struct event_config *cfg,
481     const char *method)
482 {
483 	struct event_config_entry *entry;
484 
485 	TAILQ_FOREACH(entry, &cfg->entries, next) {
486 		if (entry->avoid_method != NULL &&
487 		    strcmp(entry->avoid_method, method) == 0)
488 			return (1);
489 	}
490 
491 	return (0);
492 }
493 
494 /** Return true iff 'method' is disabled according to the environment. */
495 static int
496 event_is_method_disabled(const char *name)
497 {
498 	char environment[64];
499 	int i;
500 
501 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
502 	for (i = 8; environment[i] != '\0'; ++i)
503 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
504 	/* Note that evutil_getenv_() ignores the environment entirely if
505 	 * we're setuid */
506 	return (evutil_getenv_(environment) != NULL);
507 }
508 
509 int
510 event_base_get_features(const struct event_base *base)
511 {
512 	return base->evsel->features;
513 }
514 
515 void
516 event_enable_debug_mode(void)
517 {
518 #ifndef EVENT__DISABLE_DEBUG_MODE
519 	if (event_debug_mode_on_)
520 		event_errx(1, "%s was called twice!", __func__);
521 	if (event_debug_mode_too_late)
522 		event_errx(1, "%s must be called *before* creating any events "
523 		    "or event_bases",__func__);
524 
525 	event_debug_mode_on_ = 1;
526 
527 	HT_INIT(event_debug_map, &global_debug_map);
528 #endif
529 }
530 
531 void
532 event_disable_debug_mode(void)
533 {
534 #ifndef EVENT__DISABLE_DEBUG_MODE
535 	struct event_debug_entry **ent, *victim;
536 
537 	EVLOCK_LOCK(event_debug_map_lock_, 0);
538 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
539 		victim = *ent;
540 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
541 		mm_free(victim);
542 	}
543 	HT_CLEAR(event_debug_map, &global_debug_map);
544 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
545 
546 	event_debug_mode_on_  = 0;
547 #endif
548 }
549 
550 struct event_base *
551 event_base_new_with_config(const struct event_config *cfg)
552 {
553 	int i;
554 	struct event_base *base;
555 	int should_check_environment;
556 
557 #ifndef EVENT__DISABLE_DEBUG_MODE
558 	event_debug_mode_too_late = 1;
559 #endif
560 
561 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
562 		event_warn("%s: calloc", __func__);
563 		return NULL;
564 	}
565 
566 	if (cfg)
567 		base->flags = cfg->flags;
568 
569 	should_check_environment =
570 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
571 
572 	{
573 		struct timeval tmp;
574 		int precise_time =
575 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
576 		int flags;
577 		if (should_check_environment && !precise_time) {
578 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
579 			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
580 		}
581 		flags = precise_time ? EV_MONOT_PRECISE : 0;
582 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
583 
584 		gettime(base, &tmp);
585 	}
586 
587 	min_heap_ctor_(&base->timeheap);
588 
589 	base->sig.ev_signal_pair[0] = -1;
590 	base->sig.ev_signal_pair[1] = -1;
591 	base->th_notify_fd[0] = -1;
592 	base->th_notify_fd[1] = -1;
593 
594 	TAILQ_INIT(&base->active_later_queue);
595 
596 	evmap_io_initmap_(&base->io);
597 	evmap_signal_initmap_(&base->sigmap);
598 	event_changelist_init_(&base->changelist);
599 
600 	base->evbase = NULL;
601 
602 	if (cfg) {
603 		memcpy(&base->max_dispatch_time,
604 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
605 		base->limit_callbacks_after_prio =
606 		    cfg->limit_callbacks_after_prio;
607 	} else {
608 		base->max_dispatch_time.tv_sec = -1;
609 		base->limit_callbacks_after_prio = 1;
610 	}
611 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
612 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
613 	} else {
614 		base->max_dispatch_callbacks = INT_MAX;
615 	}
616 	if (base->max_dispatch_callbacks == INT_MAX &&
617 	    base->max_dispatch_time.tv_sec == -1)
618 		base->limit_callbacks_after_prio = INT_MAX;
619 
620 	for (i = 0; eventops[i] && !base->evbase; i++) {
621 		if (cfg != NULL) {
622 			/* determine if this backend should be avoided */
623 			if (event_config_is_avoided_method(cfg,
624 				eventops[i]->name))
625 				continue;
626 			if ((eventops[i]->features & cfg->require_features)
627 			    != cfg->require_features)
628 				continue;
629 		}
630 
631 		/* also obey the environment variables */
632 		if (should_check_environment &&
633 		    event_is_method_disabled(eventops[i]->name))
634 			continue;
635 
636 		base->evsel = eventops[i];
637 
638 		base->evbase = base->evsel->init(base);
639 	}
640 
641 	if (base->evbase == NULL) {
642 		event_warnx("%s: no event mechanism available",
643 		    __func__);
644 		base->evsel = NULL;
645 		event_base_free(base);
646 		return NULL;
647 	}
648 
649 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
650 		event_msgx("libevent using: %s", base->evsel->name);
651 
652 	/* allocate a single active event queue */
653 	if (event_base_priority_init(base, 1) < 0) {
654 		event_base_free(base);
655 		return NULL;
656 	}
657 
658 	/* prepare for threading */
659 
660 #ifndef EVENT__DISABLE_THREAD_SUPPORT
661 	if (EVTHREAD_LOCKING_ENABLED() &&
662 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
663 		int r;
664 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
665 		EVTHREAD_ALLOC_COND(base->current_event_cond);
666 		r = evthread_make_base_notifiable(base);
667 		if (r<0) {
668 			event_warnx("%s: Unable to make base notifiable.", __func__);
669 			event_base_free(base);
670 			return NULL;
671 		}
672 	}
673 #endif
674 
675 #ifdef _WIN32
676 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
677 		event_base_start_iocp_(base, cfg->n_cpus_hint);
678 #endif
679 
680 	return (base);
681 }
682 
683 int
684 event_base_start_iocp_(struct event_base *base, int n_cpus)
685 {
686 #ifdef _WIN32
687 	if (base->iocp)
688 		return 0;
689 	base->iocp = event_iocp_port_launch_(n_cpus);
690 	if (!base->iocp) {
691 		event_warnx("%s: Couldn't launch IOCP", __func__);
692 		return -1;
693 	}
694 	return 0;
695 #else
696 	return -1;
697 #endif
698 }
699 
700 void
701 event_base_stop_iocp_(struct event_base *base)
702 {
703 #ifdef _WIN32
704 	int rv;
705 
706 	if (!base->iocp)
707 		return;
708 	rv = event_iocp_shutdown_(base->iocp, -1);
709 	EVUTIL_ASSERT(rv >= 0);
710 	base->iocp = NULL;
711 #endif
712 }
713 
714 static int
715 event_base_cancel_single_callback_(struct event_base *base,
716     struct event_callback *evcb,
717     int run_finalizers)
718 {
719 	int result = 0;
720 
721 	if (evcb->evcb_flags & EVLIST_INIT) {
722 		struct event *ev = event_callback_to_event(evcb);
723 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
724 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
725 			result = 1;
726 		}
727 	} else {
728 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
729 		event_callback_cancel_nolock_(base, evcb, 1);
730 		EVBASE_RELEASE_LOCK(base, th_base_lock);
731 		result = 1;
732 	}
733 
734 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
735 		switch (evcb->evcb_closure) {
736 		case EV_CLOSURE_EVENT_FINALIZE:
737 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
738 			struct event *ev = event_callback_to_event(evcb);
739 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
740 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
741 				mm_free(ev);
742 			break;
743 		}
744 		case EV_CLOSURE_CB_FINALIZE:
745 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
746 			break;
747 		default:
748 			break;
749 		}
750 	}
751 	return result;
752 }
753 
754 static void
755 event_base_free_(struct event_base *base, int run_finalizers)
756 {
757 	int i, n_deleted=0;
758 	struct event *ev;
759 	/* XXXX grab the lock? If there is contention when one thread frees
760 	 * the base, then the contending thread will be very sad soon. */
761 
762 	/* event_base_free(NULL) is how to free the current_base if we
763 	 * made it with event_init and forgot to hold a reference to it. */
764 	if (base == NULL && current_base)
765 		base = current_base;
766 	/* Don't actually free NULL. */
767 	if (base == NULL) {
768 		event_warnx("%s: no base to free", __func__);
769 		return;
770 	}
771 	/* XXX(niels) - check for internal events first */
772 
773 #ifdef _WIN32
774 	event_base_stop_iocp_(base);
775 #endif
776 
777 	/* threading fds if we have them */
778 	if (base->th_notify_fd[0] != -1) {
779 		event_del(&base->th_notify);
780 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
781 		if (base->th_notify_fd[1] != -1)
782 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
783 		base->th_notify_fd[0] = -1;
784 		base->th_notify_fd[1] = -1;
785 		event_debug_unassign(&base->th_notify);
786 	}
787 
788 	/* Delete all non-internal events. */
789 	evmap_delete_all_(base);
790 
791 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
792 		event_del(ev);
793 		++n_deleted;
794 	}
795 	for (i = 0; i < base->n_common_timeouts; ++i) {
796 		struct common_timeout_list *ctl =
797 		    base->common_timeout_queues[i];
798 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
799 		event_debug_unassign(&ctl->timeout_event);
800 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
801 			struct event *next = TAILQ_NEXT(ev,
802 			    ev_timeout_pos.ev_next_with_common_timeout);
803 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
804 				event_del(ev);
805 				++n_deleted;
806 			}
807 			ev = next;
808 		}
809 		mm_free(ctl);
810 	}
811 	if (base->common_timeout_queues)
812 		mm_free(base->common_timeout_queues);
813 
814 	for (i = 0; i < base->nactivequeues; ++i) {
815 		struct event_callback *evcb, *next;
816 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
817 			next = TAILQ_NEXT(evcb, evcb_active_next);
818 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
819 			evcb = next;
820 		}
821 	}
822 	{
823 		struct event_callback *evcb;
824 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
825 			n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
826 		}
827 	}
828 
829 
830 	if (n_deleted)
831 		event_debug(("%s: %d events were still set in base",
832 			__func__, n_deleted));
833 
834 	while (LIST_FIRST(&base->once_events)) {
835 		struct event_once *eonce = LIST_FIRST(&base->once_events);
836 		LIST_REMOVE(eonce, next_once);
837 		mm_free(eonce);
838 	}
839 
840 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
841 		base->evsel->dealloc(base);
842 
843 	for (i = 0; i < base->nactivequeues; ++i)
844 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
845 
846 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
847 	min_heap_dtor_(&base->timeheap);
848 
849 	mm_free(base->activequeues);
850 
851 	evmap_io_clear_(&base->io);
852 	evmap_signal_clear_(&base->sigmap);
853 	event_changelist_freemem_(&base->changelist);
854 
855 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
856 	EVTHREAD_FREE_COND(base->current_event_cond);
857 
858 	/* If we're freeing current_base, there won't be a current_base. */
859 	if (base == current_base)
860 		current_base = NULL;
861 	mm_free(base);
862 }
863 
864 void
865 event_base_free_nofinalize(struct event_base *base)
866 {
867 	event_base_free_(base, 0);
868 }
869 
870 void
871 event_base_free(struct event_base *base)
872 {
873 	event_base_free_(base, 1);
874 }
875 
876 /* Fake eventop; used to disable the backend temporarily inside event_reinit
877  * so that we can call event_del() on an event without telling the backend.
878  */
879 static int
880 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
881     short events, void *fdinfo)
882 {
883 	return 0;
884 }
885 const struct eventop nil_eventop = {
886 	"nil",
887 	NULL, /* init: unused. */
888 	NULL, /* add: unused. */
889 	nil_backend_del, /* del: used, so needs to be killed. */
890 	NULL, /* dispatch: unused. */
891 	NULL, /* dealloc: unused. */
892 	0, 0, 0
893 };
894 
895 /* reinitialize the event base after a fork */
896 int
897 event_reinit(struct event_base *base)
898 {
899 	const struct eventop *evsel;
900 	int res = 0;
901 	int was_notifiable = 0;
902 	int had_signal_added = 0;
903 
904 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
905 
906 	evsel = base->evsel;
907 
908 	/* check if this event mechanism requires reinit on the backend */
909 	if (evsel->need_reinit) {
910 		/* We're going to call event_del() on our notify events (the
911 		 * ones that tell about signals and wakeup events).  But we
912 		 * don't actually want to tell the backend to change its
913 		 * state, since it might still share some resource (a kqueue,
914 		 * an epoll fd) with the parent process, and we don't want to
915 		 * delete the fds from _that_ backend, we temporarily stub out
916 		 * the evsel with a replacement.
917 		 */
918 		base->evsel = &nil_eventop;
919 	}
920 
921 	/* We need to re-create a new signal-notification fd and a new
922 	 * thread-notification fd.  Otherwise, we'll still share those with
923 	 * the parent process, which would make any notification sent to them
924 	 * get received by one or both of the event loops, more or less at
925 	 * random.
926 	 */
927 	if (base->sig.ev_signal_added) {
928 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
929 		event_debug_unassign(&base->sig.ev_signal);
930 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
931 		if (base->sig.ev_signal_pair[0] != -1)
932 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
933 		if (base->sig.ev_signal_pair[1] != -1)
934 			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
935 		had_signal_added = 1;
936 		base->sig.ev_signal_added = 0;
937 	}
938 	if (base->th_notify_fn != NULL) {
939 		was_notifiable = 1;
940 		base->th_notify_fn = NULL;
941 	}
942 	if (base->th_notify_fd[0] != -1) {
943 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
944 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
945 		if (base->th_notify_fd[1] != -1)
946 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
947 		base->th_notify_fd[0] = -1;
948 		base->th_notify_fd[1] = -1;
949 		event_debug_unassign(&base->th_notify);
950 	}
951 
952 	/* Replace the original evsel. */
953         base->evsel = evsel;
954 
955 	if (evsel->need_reinit) {
956 		/* Reconstruct the backend through brute-force, so that we do
957 		 * not share any structures with the parent process. For some
958 		 * backends, this is necessary: epoll and kqueue, for
959 		 * instance, have events associated with a kernel
960 		 * structure. If didn't reinitialize, we'd share that
961 		 * structure with the parent process, and any changes made by
962 		 * the parent would affect our backend's behavior (and vice
963 		 * versa).
964 		 */
965 		if (base->evsel->dealloc != NULL)
966 			base->evsel->dealloc(base);
967 		base->evbase = evsel->init(base);
968 		if (base->evbase == NULL) {
969 			event_errx(1,
970 			   "%s: could not reinitialize event mechanism",
971 			   __func__);
972 			res = -1;
973 			goto done;
974 		}
975 
976 		/* Empty out the changelist (if any): we are starting from a
977 		 * blank slate. */
978 		event_changelist_freemem_(&base->changelist);
979 
980 		/* Tell the event maps to re-inform the backend about all
981 		 * pending events. This will make the signal notification
982 		 * event get re-created if necessary. */
983 		if (evmap_reinit_(base) < 0)
984 			res = -1;
985 	} else {
986 		if (had_signal_added)
987 			res = evsig_init_(base);
988 	}
989 
990 	/* If we were notifiable before, and nothing just exploded, become
991 	 * notifiable again. */
992 	if (was_notifiable && res == 0)
993 		res = evthread_make_base_notifiable_nolock_(base);
994 
995 done:
996 	EVBASE_RELEASE_LOCK(base, th_base_lock);
997 	return (res);
998 }
999 
1000 /* Get the monotonic time for this event_base' timer */
1001 int
1002 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1003 {
1004   int rv = -1;
1005 
1006   if (base && tv) {
1007     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1008     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1009     EVBASE_RELEASE_LOCK(base, th_base_lock);
1010   }
1011 
1012   return rv;
1013 }
1014 
1015 const char **
1016 event_get_supported_methods(void)
1017 {
1018 	static const char **methods = NULL;
1019 	const struct eventop **method;
1020 	const char **tmp;
1021 	int i = 0, k;
1022 
1023 	/* count all methods */
1024 	for (method = &eventops[0]; *method != NULL; ++method) {
1025 		++i;
1026 	}
1027 
1028 	/* allocate one more than we need for the NULL pointer */
1029 	tmp = mm_calloc((i + 1), sizeof(char *));
1030 	if (tmp == NULL)
1031 		return (NULL);
1032 
1033 	/* populate the array with the supported methods */
1034 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1035 		tmp[i++] = eventops[k]->name;
1036 	}
1037 	tmp[i] = NULL;
1038 
1039 	if (methods != NULL)
1040 		mm_free((char**)methods);
1041 
1042 	methods = tmp;
1043 
1044 	return (methods);
1045 }
1046 
1047 struct event_config *
1048 event_config_new(void)
1049 {
1050 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1051 
1052 	if (cfg == NULL)
1053 		return (NULL);
1054 
1055 	TAILQ_INIT(&cfg->entries);
1056 	cfg->max_dispatch_interval.tv_sec = -1;
1057 	cfg->max_dispatch_callbacks = INT_MAX;
1058 	cfg->limit_callbacks_after_prio = 1;
1059 
1060 	return (cfg);
1061 }
1062 
1063 static void
1064 event_config_entry_free(struct event_config_entry *entry)
1065 {
1066 	if (entry->avoid_method != NULL)
1067 		mm_free((char *)entry->avoid_method);
1068 	mm_free(entry);
1069 }
1070 
1071 void
1072 event_config_free(struct event_config *cfg)
1073 {
1074 	struct event_config_entry *entry;
1075 
1076 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1077 		TAILQ_REMOVE(&cfg->entries, entry, next);
1078 		event_config_entry_free(entry);
1079 	}
1080 	mm_free(cfg);
1081 }
1082 
1083 int
1084 event_config_set_flag(struct event_config *cfg, int flag)
1085 {
1086 	if (!cfg)
1087 		return -1;
1088 	cfg->flags |= flag;
1089 	return 0;
1090 }
1091 
1092 int
1093 event_config_avoid_method(struct event_config *cfg, const char *method)
1094 {
1095 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1096 	if (entry == NULL)
1097 		return (-1);
1098 
1099 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1100 		mm_free(entry);
1101 		return (-1);
1102 	}
1103 
1104 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1105 
1106 	return (0);
1107 }
1108 
1109 int
1110 event_config_require_features(struct event_config *cfg,
1111     int features)
1112 {
1113 	if (!cfg)
1114 		return (-1);
1115 	cfg->require_features = features;
1116 	return (0);
1117 }
1118 
1119 int
1120 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1121 {
1122 	if (!cfg)
1123 		return (-1);
1124 	cfg->n_cpus_hint = cpus;
1125 	return (0);
1126 }
1127 
1128 int
1129 event_config_set_max_dispatch_interval(struct event_config *cfg,
1130     const struct timeval *max_interval, int max_callbacks, int min_priority)
1131 {
1132 	if (max_interval)
1133 		memcpy(&cfg->max_dispatch_interval, max_interval,
1134 		    sizeof(struct timeval));
1135 	else
1136 		cfg->max_dispatch_interval.tv_sec = -1;
1137 	cfg->max_dispatch_callbacks =
1138 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
1139 	if (min_priority < 0)
1140 		min_priority = 0;
1141 	cfg->limit_callbacks_after_prio = min_priority;
1142 	return (0);
1143 }
1144 
1145 int
1146 event_priority_init(int npriorities)
1147 {
1148 	return event_base_priority_init(current_base, npriorities);
1149 }
1150 
1151 int
1152 event_base_priority_init(struct event_base *base, int npriorities)
1153 {
1154 	int i, r;
1155 	r = -1;
1156 
1157 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1158 
1159 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1160 	    || npriorities >= EVENT_MAX_PRIORITIES)
1161 		goto err;
1162 
1163 	if (npriorities == base->nactivequeues)
1164 		goto ok;
1165 
1166 	if (base->nactivequeues) {
1167 		mm_free(base->activequeues);
1168 		base->nactivequeues = 0;
1169 	}
1170 
1171 	/* Allocate our priority queues */
1172 	base->activequeues = (struct evcallback_list *)
1173 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
1174 	if (base->activequeues == NULL) {
1175 		event_warn("%s: calloc", __func__);
1176 		goto err;
1177 	}
1178 	base->nactivequeues = npriorities;
1179 
1180 	for (i = 0; i < base->nactivequeues; ++i) {
1181 		TAILQ_INIT(&base->activequeues[i]);
1182 	}
1183 
1184 ok:
1185 	r = 0;
1186 err:
1187 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1188 	return (r);
1189 }
1190 
1191 int
1192 event_base_get_npriorities(struct event_base *base)
1193 {
1194 
1195 	int n;
1196 	if (base == NULL)
1197 		base = current_base;
1198 
1199 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1200 	n = base->nactivequeues;
1201 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1202 	return (n);
1203 }
1204 
1205 int
1206 event_base_get_num_events(struct event_base *base, unsigned int type)
1207 {
1208 	int r = 0;
1209 
1210 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1211 
1212 	if (type & EVENT_BASE_COUNT_ACTIVE)
1213 		r += base->event_count_active;
1214 
1215 	if (type & EVENT_BASE_COUNT_VIRTUAL)
1216 		r += base->virtual_event_count;
1217 
1218 	if (type & EVENT_BASE_COUNT_ADDED)
1219 		r += base->event_count;
1220 
1221 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1222 
1223 	return r;
1224 }
1225 
1226 int
1227 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1228 {
1229 	int r = 0;
1230 
1231 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1232 
1233 	if (type & EVENT_BASE_COUNT_ACTIVE) {
1234 		r += base->event_count_active_max;
1235 		if (clear)
1236 			base->event_count_active_max = 0;
1237 	}
1238 
1239 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
1240 		r += base->virtual_event_count_max;
1241 		if (clear)
1242 			base->virtual_event_count_max = 0;
1243 	}
1244 
1245 	if (type & EVENT_BASE_COUNT_ADDED) {
1246 		r += base->event_count_max;
1247 		if (clear)
1248 			base->event_count_max = 0;
1249 	}
1250 
1251 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1252 
1253 	return r;
1254 }
1255 
1256 /* Returns true iff we're currently watching any events. */
1257 static int
1258 event_haveevents(struct event_base *base)
1259 {
1260 	/* Caller must hold th_base_lock */
1261 	return (base->virtual_event_count > 0 || base->event_count > 0);
1262 }
1263 
1264 /* "closure" function called when processing active signal events */
1265 static inline void
1266 event_signal_closure(struct event_base *base, struct event *ev)
1267 {
1268 	short ncalls;
1269 	int should_break;
1270 
1271 	/* Allows deletes to work */
1272 	ncalls = ev->ev_ncalls;
1273 	if (ncalls != 0)
1274 		ev->ev_pncalls = &ncalls;
1275 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1276 	while (ncalls) {
1277 		ncalls--;
1278 		ev->ev_ncalls = ncalls;
1279 		if (ncalls == 0)
1280 			ev->ev_pncalls = NULL;
1281 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1282 
1283 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1284 		should_break = base->event_break;
1285 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1286 
1287 		if (should_break) {
1288 			if (ncalls != 0)
1289 				ev->ev_pncalls = NULL;
1290 			return;
1291 		}
1292 	}
1293 }
1294 
1295 /* Common timeouts are special timeouts that are handled as queues rather than
1296  * in the minheap.  This is more efficient than the minheap if we happen to
1297  * know that we're going to get several thousands of timeout events all with
1298  * the same timeout value.
1299  *
1300  * Since all our timeout handling code assumes timevals can be copied,
1301  * assigned, etc, we can't use "magic pointer" to encode these common
1302  * timeouts.  Searching through a list to see if every timeout is common could
1303  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1304  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1305  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1306  * of index into the event_base's aray of common timeouts.
1307  */
1308 
1309 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1310 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1311 #define COMMON_TIMEOUT_IDX_SHIFT 20
1312 #define COMMON_TIMEOUT_MASK     0xf0000000
1313 #define COMMON_TIMEOUT_MAGIC    0x50000000
1314 
1315 #define COMMON_TIMEOUT_IDX(tv) \
1316 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1317 
1318 /** Return true iff if 'tv' is a common timeout in 'base' */
1319 static inline int
1320 is_common_timeout(const struct timeval *tv,
1321     const struct event_base *base)
1322 {
1323 	int idx;
1324 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1325 		return 0;
1326 	idx = COMMON_TIMEOUT_IDX(tv);
1327 	return idx < base->n_common_timeouts;
1328 }
1329 
1330 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1331  * one is a common timeout. */
1332 static inline int
1333 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1334 {
1335 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1336 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
1337 }
1338 
1339 /** Requires that 'tv' is a common timeout.  Return the corresponding
1340  * common_timeout_list. */
1341 static inline struct common_timeout_list *
1342 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1343 {
1344 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1345 }
1346 
1347 #if 0
1348 static inline int
1349 common_timeout_ok(const struct timeval *tv,
1350     struct event_base *base)
1351 {
1352 	const struct timeval *expect =
1353 	    &get_common_timeout_list(base, tv)->duration;
1354 	return tv->tv_sec == expect->tv_sec &&
1355 	    tv->tv_usec == expect->tv_usec;
1356 }
1357 #endif
1358 
1359 /* Add the timeout for the first event in given common timeout list to the
1360  * event_base's minheap. */
1361 static void
1362 common_timeout_schedule(struct common_timeout_list *ctl,
1363     const struct timeval *now, struct event *head)
1364 {
1365 	struct timeval timeout = head->ev_timeout;
1366 	timeout.tv_usec &= MICROSECONDS_MASK;
1367 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1368 }
1369 
1370 /* Callback: invoked when the timeout for a common timeout queue triggers.
1371  * This means that (at least) the first event in that queue should be run,
1372  * and the timeout should be rescheduled if there are more events. */
1373 static void
1374 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1375 {
1376 	struct timeval now;
1377 	struct common_timeout_list *ctl = arg;
1378 	struct event_base *base = ctl->base;
1379 	struct event *ev = NULL;
1380 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1381 	gettime(base, &now);
1382 	while (1) {
1383 		ev = TAILQ_FIRST(&ctl->events);
1384 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1385 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
1386 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1387 			break;
1388 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1389 		event_active_nolock_(ev, EV_TIMEOUT, 1);
1390 	}
1391 	if (ev)
1392 		common_timeout_schedule(ctl, &now, ev);
1393 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1394 }
1395 
1396 #define MAX_COMMON_TIMEOUTS 256
1397 
1398 const struct timeval *
1399 event_base_init_common_timeout(struct event_base *base,
1400     const struct timeval *duration)
1401 {
1402 	int i;
1403 	struct timeval tv;
1404 	const struct timeval *result=NULL;
1405 	struct common_timeout_list *new_ctl;
1406 
1407 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1408 	if (duration->tv_usec > 1000000) {
1409 		memcpy(&tv, duration, sizeof(struct timeval));
1410 		if (is_common_timeout(duration, base))
1411 			tv.tv_usec &= MICROSECONDS_MASK;
1412 		tv.tv_sec += tv.tv_usec / 1000000;
1413 		tv.tv_usec %= 1000000;
1414 		duration = &tv;
1415 	}
1416 	for (i = 0; i < base->n_common_timeouts; ++i) {
1417 		const struct common_timeout_list *ctl =
1418 		    base->common_timeout_queues[i];
1419 		if (duration->tv_sec == ctl->duration.tv_sec &&
1420 		    duration->tv_usec ==
1421 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1422 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1423 			result = &ctl->duration;
1424 			goto done;
1425 		}
1426 	}
1427 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1428 		event_warnx("%s: Too many common timeouts already in use; "
1429 		    "we only support %d per event_base", __func__,
1430 		    MAX_COMMON_TIMEOUTS);
1431 		goto done;
1432 	}
1433 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1434 		int n = base->n_common_timeouts < 16 ? 16 :
1435 		    base->n_common_timeouts*2;
1436 		struct common_timeout_list **newqueues =
1437 		    mm_realloc(base->common_timeout_queues,
1438 			n*sizeof(struct common_timeout_queue *));
1439 		if (!newqueues) {
1440 			event_warn("%s: realloc",__func__);
1441 			goto done;
1442 		}
1443 		base->n_common_timeouts_allocated = n;
1444 		base->common_timeout_queues = newqueues;
1445 	}
1446 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1447 	if (!new_ctl) {
1448 		event_warn("%s: calloc",__func__);
1449 		goto done;
1450 	}
1451 	TAILQ_INIT(&new_ctl->events);
1452 	new_ctl->duration.tv_sec = duration->tv_sec;
1453 	new_ctl->duration.tv_usec =
1454 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1455 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1456 	evtimer_assign(&new_ctl->timeout_event, base,
1457 	    common_timeout_callback, new_ctl);
1458 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1459 	event_priority_set(&new_ctl->timeout_event, 0);
1460 	new_ctl->base = base;
1461 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1462 	result = &new_ctl->duration;
1463 
1464 done:
1465 	if (result)
1466 		EVUTIL_ASSERT(is_common_timeout(result, base));
1467 
1468 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1469 	return result;
1470 }
1471 
1472 /* Closure function invoked when we're activating a persistent event. */
1473 static inline void
1474 event_persist_closure(struct event_base *base, struct event *ev)
1475 {
1476 	void (*evcb_callback)(evutil_socket_t, short, void *);
1477 
1478         // Other fields of *ev that must be stored before executing
1479         evutil_socket_t evcb_fd;
1480         short evcb_res;
1481         void *evcb_arg;
1482 
1483 	/* reschedule the persistent event if we have a timeout. */
1484 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1485 		/* If there was a timeout, we want it to run at an interval of
1486 		 * ev_io_timeout after the last time it was _scheduled_ for,
1487 		 * not ev_io_timeout after _now_.  If it fired for another
1488 		 * reason, though, the timeout ought to start ticking _now_. */
1489 		struct timeval run_at, relative_to, delay, now;
1490 		ev_uint32_t usec_mask = 0;
1491 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1492 			&ev->ev_io_timeout));
1493 		gettime(base, &now);
1494 		if (is_common_timeout(&ev->ev_timeout, base)) {
1495 			delay = ev->ev_io_timeout;
1496 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1497 			delay.tv_usec &= MICROSECONDS_MASK;
1498 			if (ev->ev_res & EV_TIMEOUT) {
1499 				relative_to = ev->ev_timeout;
1500 				relative_to.tv_usec &= MICROSECONDS_MASK;
1501 			} else {
1502 				relative_to = now;
1503 			}
1504 		} else {
1505 			delay = ev->ev_io_timeout;
1506 			if (ev->ev_res & EV_TIMEOUT) {
1507 				relative_to = ev->ev_timeout;
1508 			} else {
1509 				relative_to = now;
1510 			}
1511 		}
1512 		evutil_timeradd(&relative_to, &delay, &run_at);
1513 		if (evutil_timercmp(&run_at, &now, <)) {
1514 			/* Looks like we missed at least one invocation due to
1515 			 * a clock jump, not running the event loop for a
1516 			 * while, really slow callbacks, or
1517 			 * something. Reschedule relative to now.
1518 			 */
1519 			evutil_timeradd(&now, &delay, &run_at);
1520 		}
1521 		run_at.tv_usec |= usec_mask;
1522 		event_add_nolock_(ev, &run_at, 1);
1523 	}
1524 
1525 	// Save our callback before we release the lock
1526 	evcb_callback = ev->ev_callback;
1527         evcb_fd = ev->ev_fd;
1528         evcb_res = ev->ev_res;
1529         evcb_arg = ev->ev_arg;
1530 
1531 	// Release the lock
1532  	EVBASE_RELEASE_LOCK(base, th_base_lock);
1533 
1534 	// Execute the callback
1535         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1536 }
1537 
1538 /*
1539   Helper for event_process_active to process all the events in a single queue,
1540   releasing the lock as we go.  This function requires that the lock be held
1541   when it's invoked.  Returns -1 if we get a signal or an event_break that
1542   means we should stop processing any active events now.  Otherwise returns
1543   the number of non-internal event_callbacks that we processed.
1544 */
1545 static int
1546 event_process_active_single_queue(struct event_base *base,
1547     struct evcallback_list *activeq,
1548     int max_to_process, const struct timeval *endtime)
1549 {
1550 	struct event_callback *evcb;
1551 	int count = 0;
1552 
1553 	EVUTIL_ASSERT(activeq != NULL);
1554 
1555 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1556 		struct event *ev=NULL;
1557 		if (evcb->evcb_flags & EVLIST_INIT) {
1558 			ev = event_callback_to_event(evcb);
1559 
1560 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1561 				event_queue_remove_active(base, evcb);
1562 			else
1563 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1564 			event_debug((
1565 			    "event_process_active: event: %p, %s%s%scall %p",
1566 			    ev,
1567 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
1568 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1569 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1570 			    ev->ev_callback));
1571 		} else {
1572 			event_queue_remove_active(base, evcb);
1573 			event_debug(("event_process_active: event_callback %p, "
1574 				"closure %d, call %p",
1575 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1576 		}
1577 
1578 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1579 			++count;
1580 
1581 
1582 		base->current_event = evcb;
1583 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1584 		base->current_event_waiters = 0;
1585 #endif
1586 
1587 		switch (evcb->evcb_closure) {
1588 		case EV_CLOSURE_EVENT_SIGNAL:
1589 			EVUTIL_ASSERT(ev != NULL);
1590 			event_signal_closure(base, ev);
1591 			break;
1592 		case EV_CLOSURE_EVENT_PERSIST:
1593 			EVUTIL_ASSERT(ev != NULL);
1594 			event_persist_closure(base, ev);
1595 			break;
1596 		case EV_CLOSURE_EVENT: {
1597 			void (*evcb_callback)(evutil_socket_t, short, void *);
1598 			EVUTIL_ASSERT(ev != NULL);
1599 			evcb_callback = *ev->ev_callback;
1600 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1601 			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1602 		}
1603 		break;
1604 		case EV_CLOSURE_CB_SELF: {
1605 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1606 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1607 			evcb_selfcb(evcb, evcb->evcb_arg);
1608 		}
1609 		break;
1610 		case EV_CLOSURE_EVENT_FINALIZE:
1611 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1612 			void (*evcb_evfinalize)(struct event *, void *);
1613 			int evcb_closure = evcb->evcb_closure;
1614 			EVUTIL_ASSERT(ev != NULL);
1615 			base->current_event = NULL;
1616 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1617 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1618 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1619 			evcb_evfinalize(ev, ev->ev_arg);
1620 			event_debug_note_teardown_(ev);
1621 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1622 				mm_free(ev);
1623 		}
1624 		break;
1625 		case EV_CLOSURE_CB_FINALIZE: {
1626 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1627 			base->current_event = NULL;
1628 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1629 			EVBASE_RELEASE_LOCK(base, th_base_lock);
1630 			evcb_cbfinalize(evcb, evcb->evcb_arg);
1631 		}
1632 		break;
1633 		default:
1634 			EVUTIL_ASSERT(0);
1635 		}
1636 
1637 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1638 		base->current_event = NULL;
1639 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1640 		if (base->current_event_waiters) {
1641 			base->current_event_waiters = 0;
1642 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
1643 		}
1644 #endif
1645 
1646 		if (base->event_break)
1647 			return -1;
1648 		if (count >= max_to_process)
1649 			return count;
1650 		if (count && endtime) {
1651 			struct timeval now;
1652 			update_time_cache(base);
1653 			gettime(base, &now);
1654 			if (evutil_timercmp(&now, endtime, >=))
1655 				return count;
1656 		}
1657 		if (base->event_continue)
1658 			break;
1659 	}
1660 	return count;
1661 }
1662 
1663 /*
1664  * Active events are stored in priority queues.  Lower priorities are always
1665  * process before higher priorities.  Low priority events can starve high
1666  * priority ones.
1667  */
1668 
1669 static int
1670 event_process_active(struct event_base *base)
1671 {
1672 	/* Caller must hold th_base_lock */
1673 	struct evcallback_list *activeq = NULL;
1674 	int i, c = 0;
1675 	const struct timeval *endtime;
1676 	struct timeval tv;
1677 	const int maxcb = base->max_dispatch_callbacks;
1678 	const int limit_after_prio = base->limit_callbacks_after_prio;
1679 	if (base->max_dispatch_time.tv_sec >= 0) {
1680 		update_time_cache(base);
1681 		gettime(base, &tv);
1682 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1683 		endtime = &tv;
1684 	} else {
1685 		endtime = NULL;
1686 	}
1687 
1688 	for (i = 0; i < base->nactivequeues; ++i) {
1689 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1690 			base->event_running_priority = i;
1691 			activeq = &base->activequeues[i];
1692 			if (i < limit_after_prio)
1693 				c = event_process_active_single_queue(base, activeq,
1694 				    INT_MAX, NULL);
1695 			else
1696 				c = event_process_active_single_queue(base, activeq,
1697 				    maxcb, endtime);
1698 			if (c < 0) {
1699 				goto done;
1700 			} else if (c > 0)
1701 				break; /* Processed a real event; do not
1702 					* consider lower-priority events */
1703 			/* If we get here, all of the events we processed
1704 			 * were internal.  Continue. */
1705 		}
1706 	}
1707 
1708 done:
1709 	base->event_running_priority = -1;
1710 
1711 	return c;
1712 }
1713 
1714 /*
1715  * Wait continuously for events.  We exit only if no events are left.
1716  */
1717 
1718 int
1719 event_dispatch(void)
1720 {
1721 	return (event_loop(0));
1722 }
1723 
1724 int
1725 event_base_dispatch(struct event_base *event_base)
1726 {
1727 	return (event_base_loop(event_base, 0));
1728 }
1729 
1730 const char *
1731 event_base_get_method(const struct event_base *base)
1732 {
1733 	EVUTIL_ASSERT(base);
1734 	return (base->evsel->name);
1735 }
1736 
1737 /** Callback: used to implement event_base_loopexit by telling the event_base
1738  * that it's time to exit its loop. */
1739 static void
1740 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1741 {
1742 	struct event_base *base = arg;
1743 	base->event_gotterm = 1;
1744 }
1745 
1746 int
1747 event_loopexit(const struct timeval *tv)
1748 {
1749 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1750 		    current_base, tv));
1751 }
1752 
1753 int
1754 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1755 {
1756 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1757 		    event_base, tv));
1758 }
1759 
1760 int
1761 event_loopbreak(void)
1762 {
1763 	return (event_base_loopbreak(current_base));
1764 }
1765 
1766 int
1767 event_base_loopbreak(struct event_base *event_base)
1768 {
1769 	int r = 0;
1770 	if (event_base == NULL)
1771 		return (-1);
1772 
1773 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1774 	event_base->event_break = 1;
1775 
1776 	if (EVBASE_NEED_NOTIFY(event_base)) {
1777 		r = evthread_notify_base(event_base);
1778 	} else {
1779 		r = (0);
1780 	}
1781 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1782 	return r;
1783 }
1784 
1785 int
1786 event_base_loopcontinue(struct event_base *event_base)
1787 {
1788 	int r = 0;
1789 	if (event_base == NULL)
1790 		return (-1);
1791 
1792 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1793 	event_base->event_continue = 1;
1794 
1795 	if (EVBASE_NEED_NOTIFY(event_base)) {
1796 		r = evthread_notify_base(event_base);
1797 	} else {
1798 		r = (0);
1799 	}
1800 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1801 	return r;
1802 }
1803 
1804 int
1805 event_base_got_break(struct event_base *event_base)
1806 {
1807 	int res;
1808 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1809 	res = event_base->event_break;
1810 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1811 	return res;
1812 }
1813 
1814 int
1815 event_base_got_exit(struct event_base *event_base)
1816 {
1817 	int res;
1818 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1819 	res = event_base->event_gotterm;
1820 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1821 	return res;
1822 }
1823 
1824 /* not thread safe */
1825 
1826 int
1827 event_loop(int flags)
1828 {
1829 	return event_base_loop(current_base, flags);
1830 }
1831 
1832 int
1833 event_base_loop(struct event_base *base, int flags)
1834 {
1835 	const struct eventop *evsel = base->evsel;
1836 	struct timeval tv;
1837 	struct timeval *tv_p;
1838 	int res, done, retval = 0;
1839 
1840 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
1841 	 * as we invoke user callbacks. */
1842 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1843 
1844 	if (base->running_loop) {
1845 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1846 		    " can run on each event_base at once.", __func__);
1847 		EVBASE_RELEASE_LOCK(base, th_base_lock);
1848 		return -1;
1849 	}
1850 
1851 	base->running_loop = 1;
1852 
1853 	clear_time_cache(base);
1854 
1855 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1856 		evsig_set_base_(base);
1857 
1858 	done = 0;
1859 
1860 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1861 	base->th_owner_id = EVTHREAD_GET_ID();
1862 #endif
1863 
1864 	base->event_gotterm = base->event_break = 0;
1865 
1866 	while (!done) {
1867 		base->event_continue = 0;
1868 		base->n_deferreds_queued = 0;
1869 
1870 		/* Terminate the loop if we have been asked to */
1871 		if (base->event_gotterm) {
1872 			break;
1873 		}
1874 
1875 		if (base->event_break) {
1876 			break;
1877 		}
1878 
1879 		tv_p = &tv;
1880 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1881 			timeout_next(base, &tv_p);
1882 		} else {
1883 			/*
1884 			 * if we have active events, we just poll new events
1885 			 * without waiting.
1886 			 */
1887 			evutil_timerclear(&tv);
1888 		}
1889 
1890 		/* If we have no events, we just exit */
1891 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1892 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1893 			event_debug(("%s: no events registered.", __func__));
1894 			retval = 1;
1895 			goto done;
1896 		}
1897 
1898 		event_queue_make_later_events_active(base);
1899 
1900 		clear_time_cache(base);
1901 
1902 		res = evsel->dispatch(base, tv_p);
1903 
1904 		if (res == -1) {
1905 			event_debug(("%s: dispatch returned unsuccessfully.",
1906 				__func__));
1907 			retval = -1;
1908 			goto done;
1909 		}
1910 
1911 		update_time_cache(base);
1912 
1913 		timeout_process(base);
1914 
1915 		if (N_ACTIVE_CALLBACKS(base)) {
1916 			int n = event_process_active(base);
1917 			if ((flags & EVLOOP_ONCE)
1918 			    && N_ACTIVE_CALLBACKS(base) == 0
1919 			    && n != 0)
1920 				done = 1;
1921 		} else if (flags & EVLOOP_NONBLOCK)
1922 			done = 1;
1923 	}
1924 	event_debug(("%s: asked to terminate loop.", __func__));
1925 
1926 done:
1927 	clear_time_cache(base);
1928 	base->running_loop = 0;
1929 
1930 	EVBASE_RELEASE_LOCK(base, th_base_lock);
1931 
1932 	return (retval);
1933 }
1934 
1935 /* One-time callback to implement event_base_once: invokes the user callback,
1936  * then deletes the allocated storage */
1937 static void
1938 event_once_cb(evutil_socket_t fd, short events, void *arg)
1939 {
1940 	struct event_once *eonce = arg;
1941 
1942 	(*eonce->cb)(fd, events, eonce->arg);
1943 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1944 	LIST_REMOVE(eonce, next_once);
1945 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
1946 	event_debug_unassign(&eonce->ev);
1947 	mm_free(eonce);
1948 }
1949 
1950 /* not threadsafe, event scheduled once. */
1951 int
1952 event_once(evutil_socket_t fd, short events,
1953     void (*callback)(evutil_socket_t, short, void *),
1954     void *arg, const struct timeval *tv)
1955 {
1956 	return event_base_once(current_base, fd, events, callback, arg, tv);
1957 }
1958 
1959 /* Schedules an event once */
1960 int
1961 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
1962     void (*callback)(evutil_socket_t, short, void *),
1963     void *arg, const struct timeval *tv)
1964 {
1965 	struct event_once *eonce;
1966 	int res = 0;
1967 	int activate = 0;
1968 
1969 	/* We cannot support signals that just fire once, or persistent
1970 	 * events. */
1971 	if (events & (EV_SIGNAL|EV_PERSIST))
1972 		return (-1);
1973 
1974 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
1975 		return (-1);
1976 
1977 	eonce->cb = callback;
1978 	eonce->arg = arg;
1979 
1980 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
1981 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
1982 
1983 		if (tv == NULL || ! evutil_timerisset(tv)) {
1984 			/* If the event is going to become active immediately,
1985 			 * don't put it on the timeout queue.  This is one
1986 			 * idiom for scheduling a callback, so let's make
1987 			 * it fast (and order-preserving). */
1988 			activate = 1;
1989 		}
1990 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
1991 		events &= EV_READ|EV_WRITE|EV_CLOSED;
1992 
1993 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
1994 	} else {
1995 		/* Bad event combination */
1996 		mm_free(eonce);
1997 		return (-1);
1998 	}
1999 
2000 	if (res == 0) {
2001 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2002 		if (activate)
2003 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2004 		else
2005 			res = event_add_nolock_(&eonce->ev, tv, 0);
2006 
2007 		if (res != 0) {
2008 			mm_free(eonce);
2009 			return (res);
2010 		} else {
2011 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2012 		}
2013 		EVBASE_RELEASE_LOCK(base, th_base_lock);
2014 	}
2015 
2016 	return (0);
2017 }
2018 
2019 int
2020 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2021 {
2022 	if (!base)
2023 		base = current_base;
2024 	if (arg == &event_self_cbarg_ptr_)
2025 		arg = ev;
2026 
2027 	event_debug_assert_not_added_(ev);
2028 
2029 	ev->ev_base = base;
2030 
2031 	ev->ev_callback = callback;
2032 	ev->ev_arg = arg;
2033 	ev->ev_fd = fd;
2034 	ev->ev_events = events;
2035 	ev->ev_res = 0;
2036 	ev->ev_flags = EVLIST_INIT;
2037 	ev->ev_ncalls = 0;
2038 	ev->ev_pncalls = NULL;
2039 
2040 	if (events & EV_SIGNAL) {
2041 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2042 			event_warnx("%s: EV_SIGNAL is not compatible with "
2043 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2044 			return -1;
2045 		}
2046 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2047 	} else {
2048 		if (events & EV_PERSIST) {
2049 			evutil_timerclear(&ev->ev_io_timeout);
2050 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2051 		} else {
2052 			ev->ev_closure = EV_CLOSURE_EVENT;
2053 		}
2054 	}
2055 
2056 	min_heap_elem_init_(ev);
2057 
2058 	if (base != NULL) {
2059 		/* by default, we put new events into the middle priority */
2060 		ev->ev_pri = base->nactivequeues / 2;
2061 	}
2062 
2063 	event_debug_note_setup_(ev);
2064 
2065 	return 0;
2066 }
2067 
2068 int
2069 event_base_set(struct event_base *base, struct event *ev)
2070 {
2071 	/* Only innocent events may be assigned to a different base */
2072 	if (ev->ev_flags != EVLIST_INIT)
2073 		return (-1);
2074 
2075 	event_debug_assert_is_setup_(ev);
2076 
2077 	ev->ev_base = base;
2078 	ev->ev_pri = base->nactivequeues/2;
2079 
2080 	return (0);
2081 }
2082 
2083 void
2084 event_set(struct event *ev, evutil_socket_t fd, short events,
2085 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
2086 {
2087 	int r;
2088 	r = event_assign(ev, current_base, fd, events, callback, arg);
2089 	EVUTIL_ASSERT(r == 0);
2090 }
2091 
2092 void *
2093 event_self_cbarg(void)
2094 {
2095 	return &event_self_cbarg_ptr_;
2096 }
2097 
2098 struct event *
2099 event_base_get_running_event(struct event_base *base)
2100 {
2101 	struct event *ev = NULL;
2102 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2103 	if (EVBASE_IN_THREAD(base)) {
2104 		struct event_callback *evcb = base->current_event;
2105 		if (evcb->evcb_flags & EVLIST_INIT)
2106 			ev = event_callback_to_event(evcb);
2107 	}
2108 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2109 	return ev;
2110 }
2111 
2112 struct event *
2113 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2114 {
2115 	struct event *ev;
2116 	ev = mm_malloc(sizeof(struct event));
2117 	if (ev == NULL)
2118 		return (NULL);
2119 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2120 		mm_free(ev);
2121 		return (NULL);
2122 	}
2123 
2124 	return (ev);
2125 }
2126 
2127 void
2128 event_free(struct event *ev)
2129 {
2130 	/* This is disabled, so that events which have been finalized be a
2131 	 * valid target for event_free(). That's */
2132 	// event_debug_assert_is_setup_(ev);
2133 
2134 	/* make sure that this event won't be coming back to haunt us. */
2135 	event_del(ev);
2136 	event_debug_note_teardown_(ev);
2137 	mm_free(ev);
2138 
2139 }
2140 
2141 void
2142 event_debug_unassign(struct event *ev)
2143 {
2144 	event_debug_assert_not_added_(ev);
2145 	event_debug_note_teardown_(ev);
2146 
2147 	ev->ev_flags &= ~EVLIST_INIT;
2148 }
2149 
2150 #define EVENT_FINALIZE_FREE_ 0x10000
2151 static int
2152 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2153 {
2154 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2155 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2156 
2157 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2158 	ev->ev_closure = closure;
2159 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2160 	event_active_nolock_(ev, EV_FINALIZE, 1);
2161 	ev->ev_flags |= EVLIST_FINALIZING;
2162 	return 0;
2163 }
2164 
2165 static int
2166 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2167 {
2168 	int r;
2169 	struct event_base *base = ev->ev_base;
2170 	if (EVUTIL_FAILURE_CHECK(!base)) {
2171 		event_warnx("%s: event has no event_base set.", __func__);
2172 		return -1;
2173 	}
2174 
2175 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2176 	r = event_finalize_nolock_(base, flags, ev, cb);
2177 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2178 	return r;
2179 }
2180 
2181 int
2182 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2183 {
2184 	return event_finalize_impl_(flags, ev, cb);
2185 }
2186 
2187 int
2188 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2189 {
2190 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2191 }
2192 
2193 void
2194 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2195 {
2196 	struct event *ev = NULL;
2197 	if (evcb->evcb_flags & EVLIST_INIT) {
2198 		ev = event_callback_to_event(evcb);
2199 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2200 	} else {
2201 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2202 	}
2203 
2204 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2205 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
2206 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2207 	evcb->evcb_flags |= EVLIST_FINALIZING;
2208 }
2209 
2210 void
2211 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2212 {
2213 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2214 	event_callback_finalize_nolock_(base, flags, evcb, cb);
2215 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2216 }
2217 
2218 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2219  * callback will be invoked on *one of them*, after they have *all* been
2220  * finalized. */
2221 int
2222 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2223 {
2224 	int n_pending = 0, i;
2225 
2226 	if (base == NULL)
2227 		base = current_base;
2228 
2229 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2230 
2231 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
2232 
2233 	/* At most one can be currently executing; the rest we just
2234 	 * cancel... But we always make sure that the finalize callback
2235 	 * runs. */
2236 	for (i = 0; i < n_cbs; ++i) {
2237 		struct event_callback *evcb = evcbs[i];
2238 		if (evcb == base->current_event) {
2239 			event_callback_finalize_nolock_(base, 0, evcb, cb);
2240 			++n_pending;
2241 		} else {
2242 			event_callback_cancel_nolock_(base, evcb, 0);
2243 		}
2244 	}
2245 
2246 	if (n_pending == 0) {
2247 		/* Just do the first one. */
2248 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2249 	}
2250 
2251 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2252 	return 0;
2253 }
2254 
2255 /*
2256  * Set's the priority of an event - if an event is already scheduled
2257  * changing the priority is going to fail.
2258  */
2259 
2260 int
2261 event_priority_set(struct event *ev, int pri)
2262 {
2263 	event_debug_assert_is_setup_(ev);
2264 
2265 	if (ev->ev_flags & EVLIST_ACTIVE)
2266 		return (-1);
2267 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2268 		return (-1);
2269 
2270 	ev->ev_pri = pri;
2271 
2272 	return (0);
2273 }
2274 
2275 /*
2276  * Checks if a specific event is pending or scheduled.
2277  */
2278 
2279 int
2280 event_pending(const struct event *ev, short event, struct timeval *tv)
2281 {
2282 	int flags = 0;
2283 
2284 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2285 		event_warnx("%s: event has no event_base set.", __func__);
2286 		return 0;
2287 	}
2288 
2289 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2290 	event_debug_assert_is_setup_(ev);
2291 
2292 	if (ev->ev_flags & EVLIST_INSERTED)
2293 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2294 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2295 		flags |= ev->ev_res;
2296 	if (ev->ev_flags & EVLIST_TIMEOUT)
2297 		flags |= EV_TIMEOUT;
2298 
2299 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2300 
2301 	/* See if there is a timeout that we should report */
2302 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2303 		struct timeval tmp = ev->ev_timeout;
2304 		tmp.tv_usec &= MICROSECONDS_MASK;
2305 		/* correctly remamp to real time */
2306 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2307 	}
2308 
2309 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2310 
2311 	return (flags & event);
2312 }
2313 
2314 int
2315 event_initialized(const struct event *ev)
2316 {
2317 	if (!(ev->ev_flags & EVLIST_INIT))
2318 		return 0;
2319 
2320 	return 1;
2321 }
2322 
2323 void
2324 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2325 {
2326 	event_debug_assert_is_setup_(event);
2327 
2328 	if (base_out)
2329 		*base_out = event->ev_base;
2330 	if (fd_out)
2331 		*fd_out = event->ev_fd;
2332 	if (events_out)
2333 		*events_out = event->ev_events;
2334 	if (callback_out)
2335 		*callback_out = event->ev_callback;
2336 	if (arg_out)
2337 		*arg_out = event->ev_arg;
2338 }
2339 
2340 size_t
2341 event_get_struct_event_size(void)
2342 {
2343 	return sizeof(struct event);
2344 }
2345 
2346 evutil_socket_t
2347 event_get_fd(const struct event *ev)
2348 {
2349 	event_debug_assert_is_setup_(ev);
2350 	return ev->ev_fd;
2351 }
2352 
2353 struct event_base *
2354 event_get_base(const struct event *ev)
2355 {
2356 	event_debug_assert_is_setup_(ev);
2357 	return ev->ev_base;
2358 }
2359 
2360 short
2361 event_get_events(const struct event *ev)
2362 {
2363 	event_debug_assert_is_setup_(ev);
2364 	return ev->ev_events;
2365 }
2366 
2367 event_callback_fn
2368 event_get_callback(const struct event *ev)
2369 {
2370 	event_debug_assert_is_setup_(ev);
2371 	return ev->ev_callback;
2372 }
2373 
2374 void *
2375 event_get_callback_arg(const struct event *ev)
2376 {
2377 	event_debug_assert_is_setup_(ev);
2378 	return ev->ev_arg;
2379 }
2380 
2381 int
2382 event_get_priority(const struct event *ev)
2383 {
2384 	event_debug_assert_is_setup_(ev);
2385 	return ev->ev_pri;
2386 }
2387 
2388 int
2389 event_add(struct event *ev, const struct timeval *tv)
2390 {
2391 	int res;
2392 
2393 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2394 		event_warnx("%s: event has no event_base set.", __func__);
2395 		return -1;
2396 	}
2397 
2398 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2399 
2400 	res = event_add_nolock_(ev, tv, 0);
2401 
2402 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2403 
2404 	return (res);
2405 }
2406 
2407 /* Helper callback: wake an event_base from another thread.  This version
2408  * works by writing a byte to one end of a socketpair, so that the event_base
2409  * listening on the other end will wake up as the corresponding event
2410  * triggers */
2411 static int
2412 evthread_notify_base_default(struct event_base *base)
2413 {
2414 	char buf[1];
2415 	int r;
2416 	buf[0] = (char) 0;
2417 #ifdef _WIN32
2418 	r = send(base->th_notify_fd[1], buf, 1, 0);
2419 #else
2420 	r = write(base->th_notify_fd[1], buf, 1);
2421 #endif
2422 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2423 }
2424 
2425 #ifdef EVENT__HAVE_EVENTFD
2426 /* Helper callback: wake an event_base from another thread.  This version
2427  * assumes that you have a working eventfd() implementation. */
2428 static int
2429 evthread_notify_base_eventfd(struct event_base *base)
2430 {
2431 	ev_uint64_t msg = 1;
2432 	int r;
2433 	do {
2434 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2435 	} while (r < 0 && errno == EAGAIN);
2436 
2437 	return (r < 0) ? -1 : 0;
2438 }
2439 #endif
2440 
2441 
2442 /** Tell the thread currently running the event_loop for base (if any) that it
2443  * needs to stop waiting in its dispatch function (if it is) and process all
2444  * active callbacks. */
2445 static int
2446 evthread_notify_base(struct event_base *base)
2447 {
2448 	EVENT_BASE_ASSERT_LOCKED(base);
2449 	if (!base->th_notify_fn)
2450 		return -1;
2451 	if (base->is_notify_pending)
2452 		return 0;
2453 	base->is_notify_pending = 1;
2454 	return base->th_notify_fn(base);
2455 }
2456 
2457 /* Implementation function to remove a timeout on a currently pending event.
2458  */
2459 int
2460 event_remove_timer_nolock_(struct event *ev)
2461 {
2462 	struct event_base *base = ev->ev_base;
2463 
2464 	EVENT_BASE_ASSERT_LOCKED(base);
2465 	event_debug_assert_is_setup_(ev);
2466 
2467 	event_debug(("event_remove_timer_nolock: event: %p", ev));
2468 
2469 	/* If it's not pending on a timeout, we don't need to do anything. */
2470 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2471 		event_queue_remove_timeout(base, ev);
2472 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2473 	}
2474 
2475 	return (0);
2476 }
2477 
2478 int
2479 event_remove_timer(struct event *ev)
2480 {
2481 	int res;
2482 
2483 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2484 		event_warnx("%s: event has no event_base set.", __func__);
2485 		return -1;
2486 	}
2487 
2488 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2489 
2490 	res = event_remove_timer_nolock_(ev);
2491 
2492 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2493 
2494 	return (res);
2495 }
2496 
2497 /* Implementation function to add an event.  Works just like event_add,
2498  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2499  * we treat tv as an absolute time, not as an interval to add to the current
2500  * time */
2501 int
2502 event_add_nolock_(struct event *ev, const struct timeval *tv,
2503     int tv_is_absolute)
2504 {
2505 	struct event_base *base = ev->ev_base;
2506 	int res = 0;
2507 	int notify = 0;
2508 
2509 	EVENT_BASE_ASSERT_LOCKED(base);
2510 	event_debug_assert_is_setup_(ev);
2511 
2512 	event_debug((
2513 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2514 		 ev,
2515 		 EV_SOCK_ARG(ev->ev_fd),
2516 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
2517 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2518 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2519 		 tv ? "EV_TIMEOUT " : " ",
2520 		 ev->ev_callback));
2521 
2522 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2523 
2524 	if (ev->ev_flags & EVLIST_FINALIZING) {
2525 		/* XXXX debug */
2526 		return (-1);
2527 	}
2528 
2529 	/*
2530 	 * prepare for timeout insertion further below, if we get a
2531 	 * failure on any step, we should not change any state.
2532 	 */
2533 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2534 		if (min_heap_reserve_(&base->timeheap,
2535 			1 + min_heap_size_(&base->timeheap)) == -1)
2536 			return (-1);  /* ENOMEM == errno */
2537 	}
2538 
2539 	/* If the main thread is currently executing a signal event's
2540 	 * callback, and we are not the main thread, then we want to wait
2541 	 * until the callback is done before we mess with the event, or else
2542 	 * we can race on ev_ncalls and ev_pncalls below. */
2543 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2544 	if (base->current_event == event_to_event_callback(ev) &&
2545 	    (ev->ev_events & EV_SIGNAL)
2546 	    && !EVBASE_IN_THREAD(base)) {
2547 		++base->current_event_waiters;
2548 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2549 	}
2550 #endif
2551 
2552 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2553 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2554 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2555 			res = evmap_io_add_(base, ev->ev_fd, ev);
2556 		else if (ev->ev_events & EV_SIGNAL)
2557 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2558 		if (res != -1)
2559 			event_queue_insert_inserted(base, ev);
2560 		if (res == 1) {
2561 			/* evmap says we need to notify the main thread. */
2562 			notify = 1;
2563 			res = 0;
2564 		}
2565 	}
2566 
2567 	/*
2568 	 * we should change the timeout state only if the previous event
2569 	 * addition succeeded.
2570 	 */
2571 	if (res != -1 && tv != NULL) {
2572 		struct timeval now;
2573 		int common_timeout;
2574 #ifdef USE_REINSERT_TIMEOUT
2575 		int was_common;
2576 		int old_timeout_idx;
2577 #endif
2578 
2579 		/*
2580 		 * for persistent timeout events, we remember the
2581 		 * timeout value and re-add the event.
2582 		 *
2583 		 * If tv_is_absolute, this was already set.
2584 		 */
2585 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2586 			ev->ev_io_timeout = *tv;
2587 
2588 #ifndef USE_REINSERT_TIMEOUT
2589 		if (ev->ev_flags & EVLIST_TIMEOUT) {
2590 			event_queue_remove_timeout(base, ev);
2591 		}
2592 #endif
2593 
2594 		/* Check if it is active due to a timeout.  Rescheduling
2595 		 * this timeout before the callback can be executed
2596 		 * removes it from the active list. */
2597 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
2598 		    (ev->ev_res & EV_TIMEOUT)) {
2599 			if (ev->ev_events & EV_SIGNAL) {
2600 				/* See if we are just active executing
2601 				 * this event in a loop
2602 				 */
2603 				if (ev->ev_ncalls && ev->ev_pncalls) {
2604 					/* Abort loop */
2605 					*ev->ev_pncalls = 0;
2606 				}
2607 			}
2608 
2609 			event_queue_remove_active(base, event_to_event_callback(ev));
2610 		}
2611 
2612 		gettime(base, &now);
2613 
2614 		common_timeout = is_common_timeout(tv, base);
2615 #ifdef USE_REINSERT_TIMEOUT
2616 		was_common = is_common_timeout(&ev->ev_timeout, base);
2617 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2618 #endif
2619 
2620 		if (tv_is_absolute) {
2621 			ev->ev_timeout = *tv;
2622 		} else if (common_timeout) {
2623 			struct timeval tmp = *tv;
2624 			tmp.tv_usec &= MICROSECONDS_MASK;
2625 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2626 			ev->ev_timeout.tv_usec |=
2627 			    (tv->tv_usec & ~MICROSECONDS_MASK);
2628 		} else {
2629 			evutil_timeradd(&now, tv, &ev->ev_timeout);
2630 		}
2631 
2632 		event_debug((
2633 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2634 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2635 
2636 #ifdef USE_REINSERT_TIMEOUT
2637 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2638 #else
2639 		event_queue_insert_timeout(base, ev);
2640 #endif
2641 
2642 		if (common_timeout) {
2643 			struct common_timeout_list *ctl =
2644 			    get_common_timeout_list(base, &ev->ev_timeout);
2645 			if (ev == TAILQ_FIRST(&ctl->events)) {
2646 				common_timeout_schedule(ctl, &now, ev);
2647 			}
2648 		} else {
2649 			struct event* top = NULL;
2650 			/* See if the earliest timeout is now earlier than it
2651 			 * was before: if so, we will need to tell the main
2652 			 * thread to wake up earlier than it would otherwise.
2653 			 * We double check the timeout of the top element to
2654 			 * handle time distortions due to system suspension.
2655 			 */
2656 			if (min_heap_elt_is_top_(ev))
2657 				notify = 1;
2658 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2659 					 evutil_timercmp(&top->ev_timeout, &now, <))
2660 				notify = 1;
2661 		}
2662 	}
2663 
2664 	/* if we are not in the right thread, we need to wake up the loop */
2665 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2666 		evthread_notify_base(base);
2667 
2668 	event_debug_note_add_(ev);
2669 
2670 	return (res);
2671 }
2672 
2673 static int
2674 event_del_(struct event *ev, int blocking)
2675 {
2676 	int res;
2677 
2678 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2679 		event_warnx("%s: event has no event_base set.", __func__);
2680 		return -1;
2681 	}
2682 
2683 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2684 
2685 	res = event_del_nolock_(ev, blocking);
2686 
2687 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2688 
2689 	return (res);
2690 }
2691 
2692 int
2693 event_del(struct event *ev)
2694 {
2695 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2696 }
2697 
2698 int
2699 event_del_block(struct event *ev)
2700 {
2701 	return event_del_(ev, EVENT_DEL_BLOCK);
2702 }
2703 
2704 int
2705 event_del_noblock(struct event *ev)
2706 {
2707 	return event_del_(ev, EVENT_DEL_NOBLOCK);
2708 }
2709 
2710 /** Helper for event_del: always called with th_base_lock held.
2711  *
2712  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2713  * EVEN_IF_FINALIZING} values. See those for more information.
2714  */
2715 int
2716 event_del_nolock_(struct event *ev, int blocking)
2717 {
2718 	struct event_base *base;
2719 	int res = 0, notify = 0;
2720 
2721 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2722 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2723 
2724 	/* An event without a base has not been added */
2725 	if (ev->ev_base == NULL)
2726 		return (-1);
2727 
2728 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2729 
2730 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2731 		if (ev->ev_flags & EVLIST_FINALIZING) {
2732 			/* XXXX Debug */
2733 			return 0;
2734 		}
2735 	}
2736 
2737 	/* If the main thread is currently executing this event's callback,
2738 	 * and we are not the main thread, then we want to wait until the
2739 	 * callback is done before we start removing the event.  That way,
2740 	 * when this function returns, it will be safe to free the
2741 	 * user-supplied argument. */
2742 	base = ev->ev_base;
2743 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2744 	if (blocking != EVENT_DEL_NOBLOCK &&
2745 	    base->current_event == event_to_event_callback(ev) &&
2746 	    !EVBASE_IN_THREAD(base) &&
2747 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2748 		++base->current_event_waiters;
2749 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2750 	}
2751 #endif
2752 
2753 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2754 
2755 	/* See if we are just active executing this event in a loop */
2756 	if (ev->ev_events & EV_SIGNAL) {
2757 		if (ev->ev_ncalls && ev->ev_pncalls) {
2758 			/* Abort loop */
2759 			*ev->ev_pncalls = 0;
2760 		}
2761 	}
2762 
2763 	if (ev->ev_flags & EVLIST_TIMEOUT) {
2764 		/* NOTE: We never need to notify the main thread because of a
2765 		 * deleted timeout event: all that could happen if we don't is
2766 		 * that the dispatch loop might wake up too early.  But the
2767 		 * point of notifying the main thread _is_ to wake up the
2768 		 * dispatch loop early anyway, so we wouldn't gain anything by
2769 		 * doing it.
2770 		 */
2771 		event_queue_remove_timeout(base, ev);
2772 	}
2773 
2774 	if (ev->ev_flags & EVLIST_ACTIVE)
2775 		event_queue_remove_active(base, event_to_event_callback(ev));
2776 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2777 		event_queue_remove_active_later(base, event_to_event_callback(ev));
2778 
2779 	if (ev->ev_flags & EVLIST_INSERTED) {
2780 		event_queue_remove_inserted(base, ev);
2781 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2782 			res = evmap_io_del_(base, ev->ev_fd, ev);
2783 		else
2784 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2785 		if (res == 1) {
2786 			/* evmap says we need to notify the main thread. */
2787 			notify = 1;
2788 			res = 0;
2789 		}
2790 	}
2791 
2792 	/* if we are not in the right thread, we need to wake up the loop */
2793 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2794 		evthread_notify_base(base);
2795 
2796 	event_debug_note_del_(ev);
2797 
2798 	return (res);
2799 }
2800 
2801 void
2802 event_active(struct event *ev, int res, short ncalls)
2803 {
2804 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2805 		event_warnx("%s: event has no event_base set.", __func__);
2806 		return;
2807 	}
2808 
2809 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2810 
2811 	event_debug_assert_is_setup_(ev);
2812 
2813 	event_active_nolock_(ev, res, ncalls);
2814 
2815 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2816 }
2817 
2818 
2819 void
2820 event_active_nolock_(struct event *ev, int res, short ncalls)
2821 {
2822 	struct event_base *base;
2823 
2824 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2825 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2826 
2827 	base = ev->ev_base;
2828 	EVENT_BASE_ASSERT_LOCKED(base);
2829 
2830 	if (ev->ev_flags & EVLIST_FINALIZING) {
2831 		/* XXXX debug */
2832 		return;
2833 	}
2834 
2835 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2836 	default:
2837 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2838 		EVUTIL_ASSERT(0);
2839 		break;
2840 	case EVLIST_ACTIVE:
2841 		/* We get different kinds of events, add them together */
2842 		ev->ev_res |= res;
2843 		return;
2844 	case EVLIST_ACTIVE_LATER:
2845 		ev->ev_res |= res;
2846 		break;
2847 	case 0:
2848 		ev->ev_res = res;
2849 		break;
2850 	}
2851 
2852 	if (ev->ev_pri < base->event_running_priority)
2853 		base->event_continue = 1;
2854 
2855 	if (ev->ev_events & EV_SIGNAL) {
2856 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2857 		if (base->current_event == event_to_event_callback(ev) &&
2858 		    !EVBASE_IN_THREAD(base)) {
2859 			++base->current_event_waiters;
2860 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2861 		}
2862 #endif
2863 		ev->ev_ncalls = ncalls;
2864 		ev->ev_pncalls = NULL;
2865 	}
2866 
2867 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
2868 }
2869 
2870 void
2871 event_active_later_(struct event *ev, int res)
2872 {
2873 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2874 	event_active_later_nolock_(ev, res);
2875 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2876 }
2877 
2878 void
2879 event_active_later_nolock_(struct event *ev, int res)
2880 {
2881 	struct event_base *base = ev->ev_base;
2882 	EVENT_BASE_ASSERT_LOCKED(base);
2883 
2884 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2885 		/* We get different kinds of events, add them together */
2886 		ev->ev_res |= res;
2887 		return;
2888 	}
2889 
2890 	ev->ev_res = res;
2891 
2892 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2893 }
2894 
2895 int
2896 event_callback_activate_(struct event_base *base,
2897     struct event_callback *evcb)
2898 {
2899 	int r;
2900 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2901 	r = event_callback_activate_nolock_(base, evcb);
2902 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2903 	return r;
2904 }
2905 
2906 int
2907 event_callback_activate_nolock_(struct event_base *base,
2908     struct event_callback *evcb)
2909 {
2910 	int r = 1;
2911 
2912 	if (evcb->evcb_flags & EVLIST_FINALIZING)
2913 		return 0;
2914 
2915 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2916 	default:
2917 		EVUTIL_ASSERT(0);
2918 	case EVLIST_ACTIVE_LATER:
2919 		event_queue_remove_active_later(base, evcb);
2920 		r = 0;
2921 		break;
2922 	case EVLIST_ACTIVE:
2923 		return 0;
2924 	case 0:
2925 		break;
2926 	}
2927 
2928 	event_queue_insert_active(base, evcb);
2929 
2930 	if (EVBASE_NEED_NOTIFY(base))
2931 		evthread_notify_base(base);
2932 
2933 	return r;
2934 }
2935 
2936 void
2937 event_callback_activate_later_nolock_(struct event_base *base,
2938     struct event_callback *evcb)
2939 {
2940 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2941 		return;
2942 
2943 	event_queue_insert_active_later(base, evcb);
2944 	if (EVBASE_NEED_NOTIFY(base))
2945 		evthread_notify_base(base);
2946 }
2947 
2948 void
2949 event_callback_init_(struct event_base *base,
2950     struct event_callback *cb)
2951 {
2952 	memset(cb, 0, sizeof(*cb));
2953 	cb->evcb_pri = base->nactivequeues - 1;
2954 }
2955 
2956 int
2957 event_callback_cancel_(struct event_base *base,
2958     struct event_callback *evcb)
2959 {
2960 	int r;
2961 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2962 	r = event_callback_cancel_nolock_(base, evcb, 0);
2963 	EVBASE_RELEASE_LOCK(base, th_base_lock);
2964 	return r;
2965 }
2966 
2967 int
2968 event_callback_cancel_nolock_(struct event_base *base,
2969     struct event_callback *evcb, int even_if_finalizing)
2970 {
2971 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
2972 		return 0;
2973 
2974 	if (evcb->evcb_flags & EVLIST_INIT)
2975 		return event_del_nolock_(event_callback_to_event(evcb),
2976 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
2977 
2978 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2979 	default:
2980 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2981 		EVUTIL_ASSERT(0);
2982 		break;
2983 	case EVLIST_ACTIVE:
2984 		/* We get different kinds of events, add them together */
2985 		event_queue_remove_active(base, evcb);
2986 		return 0;
2987 	case EVLIST_ACTIVE_LATER:
2988 		event_queue_remove_active_later(base, evcb);
2989 		break;
2990 	case 0:
2991 		break;
2992 	}
2993 
2994 	return 0;
2995 }
2996 
2997 void
2998 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
2999 {
3000 	memset(cb, 0, sizeof(*cb));
3001 	cb->evcb_cb_union.evcb_selfcb = fn;
3002 	cb->evcb_arg = arg;
3003 	cb->evcb_pri = priority;
3004 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
3005 }
3006 
3007 void
3008 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3009 {
3010 	cb->evcb_pri = priority;
3011 }
3012 
3013 void
3014 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3015 {
3016 	if (!base)
3017 		base = current_base;
3018 	event_callback_cancel_(base, cb);
3019 }
3020 
3021 #define MAX_DEFERREDS_QUEUED 32
3022 int
3023 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3024 {
3025 	int r = 1;
3026 	if (!base)
3027 		base = current_base;
3028 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3029 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3030 		event_callback_activate_later_nolock_(base, cb);
3031 	} else {
3032 		++base->n_deferreds_queued;
3033 		r = event_callback_activate_nolock_(base, cb);
3034 	}
3035 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3036 	return r;
3037 }
3038 
3039 static int
3040 timeout_next(struct event_base *base, struct timeval **tv_p)
3041 {
3042 	/* Caller must hold th_base_lock */
3043 	struct timeval now;
3044 	struct event *ev;
3045 	struct timeval *tv = *tv_p;
3046 	int res = 0;
3047 
3048 	ev = min_heap_top_(&base->timeheap);
3049 
3050 	if (ev == NULL) {
3051 		/* if no time-based events are active wait for I/O */
3052 		*tv_p = NULL;
3053 		goto out;
3054 	}
3055 
3056 	if (gettime(base, &now) == -1) {
3057 		res = -1;
3058 		goto out;
3059 	}
3060 
3061 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3062 		evutil_timerclear(tv);
3063 		goto out;
3064 	}
3065 
3066 	evutil_timersub(&ev->ev_timeout, &now, tv);
3067 
3068 	EVUTIL_ASSERT(tv->tv_sec >= 0);
3069 	EVUTIL_ASSERT(tv->tv_usec >= 0);
3070 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3071 
3072 out:
3073 	return (res);
3074 }
3075 
3076 /* Activate every event whose timeout has elapsed. */
3077 static void
3078 timeout_process(struct event_base *base)
3079 {
3080 	/* Caller must hold lock. */
3081 	struct timeval now;
3082 	struct event *ev;
3083 
3084 	if (min_heap_empty_(&base->timeheap)) {
3085 		return;
3086 	}
3087 
3088 	gettime(base, &now);
3089 
3090 	while ((ev = min_heap_top_(&base->timeheap))) {
3091 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
3092 			break;
3093 
3094 		/* delete this event from the I/O queues */
3095 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3096 
3097 		event_debug(("timeout_process: event: %p, call %p",
3098 			 ev, ev->ev_callback));
3099 		event_active_nolock_(ev, EV_TIMEOUT, 1);
3100 	}
3101 }
3102 
3103 #if (EVLIST_INTERNAL >> 4) != 1
3104 #error "Mismatch for value of EVLIST_INTERNAL"
3105 #endif
3106 
3107 #ifndef MAX
3108 #define MAX(a,b) (((a)>(b))?(a):(b))
3109 #endif
3110 
3111 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3112 
3113 /* These are a fancy way to spell
3114      if (flags & EVLIST_INTERNAL)
3115          base->event_count--/++;
3116 */
3117 #define DECR_EVENT_COUNT(base,flags) \
3118 	((base)->event_count -= (~((flags) >> 4) & 1))
3119 #define INCR_EVENT_COUNT(base,flags) do {					\
3120 	((base)->event_count += (~((flags) >> 4) & 1));				\
3121 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
3122 } while (0)
3123 
3124 static void
3125 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3126 {
3127 	EVENT_BASE_ASSERT_LOCKED(base);
3128 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3129 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3130 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3131 		return;
3132 	}
3133 	DECR_EVENT_COUNT(base, ev->ev_flags);
3134 	ev->ev_flags &= ~EVLIST_INSERTED;
3135 }
3136 static void
3137 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3138 {
3139 	EVENT_BASE_ASSERT_LOCKED(base);
3140 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3141 		event_errx(1, "%s: %p not on queue %x", __func__,
3142 			   evcb, EVLIST_ACTIVE);
3143 		return;
3144 	}
3145 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3146 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
3147 	base->event_count_active--;
3148 
3149 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3150 	    evcb, evcb_active_next);
3151 }
3152 static void
3153 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3154 {
3155 	EVENT_BASE_ASSERT_LOCKED(base);
3156 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3157 		event_errx(1, "%s: %p not on queue %x", __func__,
3158 			   evcb, EVLIST_ACTIVE_LATER);
3159 		return;
3160 	}
3161 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
3162 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3163 	base->event_count_active--;
3164 
3165 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3166 }
3167 static void
3168 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3169 {
3170 	EVENT_BASE_ASSERT_LOCKED(base);
3171 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3172 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3173 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3174 		return;
3175 	}
3176 	DECR_EVENT_COUNT(base, ev->ev_flags);
3177 	ev->ev_flags &= ~EVLIST_TIMEOUT;
3178 
3179 	if (is_common_timeout(&ev->ev_timeout, base)) {
3180 		struct common_timeout_list *ctl =
3181 		    get_common_timeout_list(base, &ev->ev_timeout);
3182 		TAILQ_REMOVE(&ctl->events, ev,
3183 		    ev_timeout_pos.ev_next_with_common_timeout);
3184 	} else {
3185 		min_heap_erase_(&base->timeheap, ev);
3186 	}
3187 }
3188 
3189 #ifdef USE_REINSERT_TIMEOUT
3190 /* Remove and reinsert 'ev' into the timeout queue. */
3191 static void
3192 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3193     int was_common, int is_common, int old_timeout_idx)
3194 {
3195 	struct common_timeout_list *ctl;
3196 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3197 		event_queue_insert_timeout(base, ev);
3198 		return;
3199 	}
3200 
3201 	switch ((was_common<<1) | is_common) {
3202 	case 3: /* Changing from one common timeout to another */
3203 		ctl = base->common_timeout_queues[old_timeout_idx];
3204 		TAILQ_REMOVE(&ctl->events, ev,
3205 		    ev_timeout_pos.ev_next_with_common_timeout);
3206 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3207 		insert_common_timeout_inorder(ctl, ev);
3208 		break;
3209 	case 2: /* Was common; is no longer common */
3210 		ctl = base->common_timeout_queues[old_timeout_idx];
3211 		TAILQ_REMOVE(&ctl->events, ev,
3212 		    ev_timeout_pos.ev_next_with_common_timeout);
3213 		min_heap_push_(&base->timeheap, ev);
3214 		break;
3215 	case 1: /* Wasn't common; has become common. */
3216 		min_heap_erase_(&base->timeheap, ev);
3217 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
3218 		insert_common_timeout_inorder(ctl, ev);
3219 		break;
3220 	case 0: /* was in heap; is still on heap. */
3221 		min_heap_adjust_(&base->timeheap, ev);
3222 		break;
3223 	default:
3224 		EVUTIL_ASSERT(0); /* unreachable */
3225 		break;
3226 	}
3227 }
3228 #endif
3229 
3230 /* Add 'ev' to the common timeout list in 'ev'. */
3231 static void
3232 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3233     struct event *ev)
3234 {
3235 	struct event *e;
3236 	/* By all logic, we should just be able to append 'ev' to the end of
3237 	 * ctl->events, since the timeout on each 'ev' is set to {the common
3238 	 * timeout} + {the time when we add the event}, and so the events
3239 	 * should arrive in order of their timeeouts.  But just in case
3240 	 * there's some wacky threading issue going on, we do a search from
3241 	 * the end of 'ev' to find the right insertion point.
3242 	 */
3243 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
3244 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3245 		/* This timercmp is a little sneaky, since both ev and e have
3246 		 * magic values in tv_usec.  Fortunately, they ought to have
3247 		 * the _same_ magic values in tv_usec.  Let's assert for that.
3248 		 */
3249 		EVUTIL_ASSERT(
3250 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3251 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3252 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3253 			    ev_timeout_pos.ev_next_with_common_timeout);
3254 			return;
3255 		}
3256 	}
3257 	TAILQ_INSERT_HEAD(&ctl->events, ev,
3258 	    ev_timeout_pos.ev_next_with_common_timeout);
3259 }
3260 
3261 static void
3262 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3263 {
3264 	EVENT_BASE_ASSERT_LOCKED(base);
3265 
3266 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3267 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3268 		    ev, EV_SOCK_ARG(ev->ev_fd));
3269 		return;
3270 	}
3271 
3272 	INCR_EVENT_COUNT(base, ev->ev_flags);
3273 
3274 	ev->ev_flags |= EVLIST_INSERTED;
3275 }
3276 
3277 static void
3278 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3279 {
3280 	EVENT_BASE_ASSERT_LOCKED(base);
3281 
3282 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
3283 		/* Double insertion is possible for active events */
3284 		return;
3285 	}
3286 
3287 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3288 
3289 	evcb->evcb_flags |= EVLIST_ACTIVE;
3290 
3291 	base->event_count_active++;
3292 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3293 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3294 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3295 	    evcb, evcb_active_next);
3296 }
3297 
3298 static void
3299 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3300 {
3301 	EVENT_BASE_ASSERT_LOCKED(base);
3302 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3303 		/* Double insertion is possible */
3304 		return;
3305 	}
3306 
3307 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
3308 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3309 	base->event_count_active++;
3310 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3311 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3312 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3313 }
3314 
3315 static void
3316 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3317 {
3318 	EVENT_BASE_ASSERT_LOCKED(base);
3319 
3320 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3321 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3322 		    ev, EV_SOCK_ARG(ev->ev_fd));
3323 		return;
3324 	}
3325 
3326 	INCR_EVENT_COUNT(base, ev->ev_flags);
3327 
3328 	ev->ev_flags |= EVLIST_TIMEOUT;
3329 
3330 	if (is_common_timeout(&ev->ev_timeout, base)) {
3331 		struct common_timeout_list *ctl =
3332 		    get_common_timeout_list(base, &ev->ev_timeout);
3333 		insert_common_timeout_inorder(ctl, ev);
3334 	} else {
3335 		min_heap_push_(&base->timeheap, ev);
3336 	}
3337 }
3338 
3339 static void
3340 event_queue_make_later_events_active(struct event_base *base)
3341 {
3342 	struct event_callback *evcb;
3343 	EVENT_BASE_ASSERT_LOCKED(base);
3344 
3345 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3346 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3347 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3348 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3349 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3350 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3351 	}
3352 }
3353 
3354 /* Functions for debugging */
3355 
3356 const char *
3357 event_get_version(void)
3358 {
3359 	return (EVENT__VERSION);
3360 }
3361 
3362 ev_uint32_t
3363 event_get_version_number(void)
3364 {
3365 	return (EVENT__NUMERIC_VERSION);
3366 }
3367 
3368 /*
3369  * No thread-safe interface needed - the information should be the same
3370  * for all threads.
3371  */
3372 
3373 const char *
3374 event_get_method(void)
3375 {
3376 	return (current_base->evsel->name);
3377 }
3378 
3379 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3380 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3381 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3382 static void (*mm_free_fn_)(void *p) = NULL;
3383 
3384 void *
3385 event_mm_malloc_(size_t sz)
3386 {
3387 	if (sz == 0)
3388 		return NULL;
3389 
3390 	if (mm_malloc_fn_)
3391 		return mm_malloc_fn_(sz);
3392 	else
3393 		return malloc(sz);
3394 }
3395 
3396 void *
3397 event_mm_calloc_(size_t count, size_t size)
3398 {
3399 	if (count == 0 || size == 0)
3400 		return NULL;
3401 
3402 	if (mm_malloc_fn_) {
3403 		size_t sz = count * size;
3404 		void *p = NULL;
3405 		if (count > EV_SIZE_MAX / size)
3406 			goto error;
3407 		p = mm_malloc_fn_(sz);
3408 		if (p)
3409 			return memset(p, 0, sz);
3410 	} else {
3411 		void *p = calloc(count, size);
3412 #ifdef _WIN32
3413 		/* Windows calloc doesn't reliably set ENOMEM */
3414 		if (p == NULL)
3415 			goto error;
3416 #endif
3417 		return p;
3418 	}
3419 
3420 error:
3421 	errno = ENOMEM;
3422 	return NULL;
3423 }
3424 
3425 char *
3426 event_mm_strdup_(const char *str)
3427 {
3428 	if (!str) {
3429 		errno = EINVAL;
3430 		return NULL;
3431 	}
3432 
3433 	if (mm_malloc_fn_) {
3434 		size_t ln = strlen(str);
3435 		void *p = NULL;
3436 		if (ln == EV_SIZE_MAX)
3437 			goto error;
3438 		p = mm_malloc_fn_(ln+1);
3439 		if (p)
3440 			return memcpy(p, str, ln+1);
3441 	} else
3442 #ifdef _WIN32
3443 		return _strdup(str);
3444 #else
3445 		return strdup(str);
3446 #endif
3447 
3448 error:
3449 	errno = ENOMEM;
3450 	return NULL;
3451 }
3452 
3453 void *
3454 event_mm_realloc_(void *ptr, size_t sz)
3455 {
3456 	if (mm_realloc_fn_)
3457 		return mm_realloc_fn_(ptr, sz);
3458 	else
3459 		return realloc(ptr, sz);
3460 }
3461 
3462 void
3463 event_mm_free_(void *ptr)
3464 {
3465 	if (mm_free_fn_)
3466 		mm_free_fn_(ptr);
3467 	else
3468 		free(ptr);
3469 }
3470 
3471 void
3472 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3473 			void *(*realloc_fn)(void *ptr, size_t sz),
3474 			void (*free_fn)(void *ptr))
3475 {
3476 	mm_malloc_fn_ = malloc_fn;
3477 	mm_realloc_fn_ = realloc_fn;
3478 	mm_free_fn_ = free_fn;
3479 }
3480 #endif
3481 
3482 #ifdef EVENT__HAVE_EVENTFD
3483 static void
3484 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3485 {
3486 	ev_uint64_t msg;
3487 	ev_ssize_t r;
3488 	struct event_base *base = arg;
3489 
3490 	r = read(fd, (void*) &msg, sizeof(msg));
3491 	if (r<0 && errno != EAGAIN) {
3492 		event_sock_warn(fd, "Error reading from eventfd");
3493 	}
3494 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3495 	base->is_notify_pending = 0;
3496 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3497 }
3498 #endif
3499 
3500 static void
3501 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3502 {
3503 	unsigned char buf[1024];
3504 	struct event_base *base = arg;
3505 #ifdef _WIN32
3506 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3507 		;
3508 #else
3509 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
3510 		;
3511 #endif
3512 
3513 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3514 	base->is_notify_pending = 0;
3515 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3516 }
3517 
3518 int
3519 evthread_make_base_notifiable(struct event_base *base)
3520 {
3521 	int r;
3522 	if (!base)
3523 		return -1;
3524 
3525 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3526 	r = evthread_make_base_notifiable_nolock_(base);
3527 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3528 	return r;
3529 }
3530 
3531 static int
3532 evthread_make_base_notifiable_nolock_(struct event_base *base)
3533 {
3534 	void (*cb)(evutil_socket_t, short, void *);
3535 	int (*notify)(struct event_base *);
3536 
3537 	if (base->th_notify_fn != NULL) {
3538 		/* The base is already notifiable: we're doing fine. */
3539 		return 0;
3540 	}
3541 
3542 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3543 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3544 		base->th_notify_fn = event_kq_notify_base_;
3545 		/* No need to add an event here; the backend can wake
3546 		 * itself up just fine. */
3547 		return 0;
3548 	}
3549 #endif
3550 
3551 #ifdef EVENT__HAVE_EVENTFD
3552 	base->th_notify_fd[0] = evutil_eventfd_(0,
3553 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3554 	if (base->th_notify_fd[0] >= 0) {
3555 		base->th_notify_fd[1] = -1;
3556 		notify = evthread_notify_base_eventfd;
3557 		cb = evthread_notify_drain_eventfd;
3558 	} else
3559 #endif
3560 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3561 		notify = evthread_notify_base_default;
3562 		cb = evthread_notify_drain_default;
3563 	} else {
3564 		return -1;
3565 	}
3566 
3567 	base->th_notify_fn = notify;
3568 
3569 	/* prepare an event that we can use for wakeup */
3570 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
3571 				 EV_READ|EV_PERSIST, cb, base);
3572 
3573 	/* we need to mark this as internal event */
3574 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
3575 	event_priority_set(&base->th_notify, 0);
3576 
3577 	return event_add_nolock_(&base->th_notify, NULL, 0);
3578 }
3579 
3580 int
3581 event_base_foreach_event_nolock_(struct event_base *base,
3582     event_base_foreach_event_cb fn, void *arg)
3583 {
3584 	int r, i;
3585 	unsigned u;
3586 	struct event *ev;
3587 
3588 	/* Start out with all the EVLIST_INSERTED events. */
3589 	if ((r = evmap_foreach_event_(base, fn, arg)))
3590 		return r;
3591 
3592 	/* Okay, now we deal with those events that have timeouts and are in
3593 	 * the min-heap. */
3594 	for (u = 0; u < base->timeheap.n; ++u) {
3595 		ev = base->timeheap.p[u];
3596 		if (ev->ev_flags & EVLIST_INSERTED) {
3597 			/* we already processed this one */
3598 			continue;
3599 		}
3600 		if ((r = fn(base, ev, arg)))
3601 			return r;
3602 	}
3603 
3604 	/* Now for the events in one of the timeout queues.
3605 	 * the min-heap. */
3606 	for (i = 0; i < base->n_common_timeouts; ++i) {
3607 		struct common_timeout_list *ctl =
3608 		    base->common_timeout_queues[i];
3609 		TAILQ_FOREACH(ev, &ctl->events,
3610 		    ev_timeout_pos.ev_next_with_common_timeout) {
3611 			if (ev->ev_flags & EVLIST_INSERTED) {
3612 				/* we already processed this one */
3613 				continue;
3614 			}
3615 			if ((r = fn(base, ev, arg)))
3616 				return r;
3617 		}
3618 	}
3619 
3620 	/* Finally, we deal wit all the active events that we haven't touched
3621 	 * yet. */
3622 	for (i = 0; i < base->nactivequeues; ++i) {
3623 		struct event_callback *evcb;
3624 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3625 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3626 				/* This isn't an event (evlist_init clear), or
3627 				 * we already processed it. (inserted or
3628 				 * timeout set */
3629 				continue;
3630 			}
3631 			ev = event_callback_to_event(evcb);
3632 			if ((r = fn(base, ev, arg)))
3633 				return r;
3634 		}
3635 	}
3636 
3637 	return 0;
3638 }
3639 
3640 /* Helper for event_base_dump_events: called on each event in the event base;
3641  * dumps only the inserted events. */
3642 static int
3643 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3644 {
3645 	FILE *output = arg;
3646 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3647 	    "sig" : "fd ";
3648 
3649 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3650 		return 0;
3651 
3652 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3653 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3654 	    (e->ev_events&EV_READ)?" Read":"",
3655 	    (e->ev_events&EV_WRITE)?" Write":"",
3656 	    (e->ev_events&EV_CLOSED)?" EOF":"",
3657 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
3658 	    (e->ev_events&EV_PERSIST)?" Persist":"",
3659 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3660 	if (e->ev_flags & EVLIST_TIMEOUT) {
3661 		struct timeval tv;
3662 		tv.tv_sec = e->ev_timeout.tv_sec;
3663 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3664 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3665 		fprintf(output, " Timeout=%ld.%06d",
3666 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3667 	}
3668 	fputc('\n', output);
3669 
3670 	return 0;
3671 }
3672 
3673 /* Helper for event_base_dump_events: called on each event in the event base;
3674  * dumps only the active events. */
3675 static int
3676 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3677 {
3678 	FILE *output = arg;
3679 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
3680 	    "sig" : "fd ";
3681 
3682 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3683 		return 0;
3684 
3685 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3686 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3687 	    (e->ev_res&EV_READ)?" Read":"",
3688 	    (e->ev_res&EV_WRITE)?" Write":"",
3689 	    (e->ev_res&EV_CLOSED)?" EOF":"",
3690 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
3691 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3692 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3693 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3694 
3695 	return 0;
3696 }
3697 
3698 int
3699 event_base_foreach_event(struct event_base *base,
3700     event_base_foreach_event_cb fn, void *arg)
3701 {
3702 	int r;
3703 	if ((!fn) || (!base)) {
3704 		return -1;
3705 	}
3706 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3707 	r = event_base_foreach_event_nolock_(base, fn, arg);
3708 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3709 	return r;
3710 }
3711 
3712 
3713 void
3714 event_base_dump_events(struct event_base *base, FILE *output)
3715 {
3716 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3717 	fprintf(output, "Inserted events:\n");
3718 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3719 
3720 	fprintf(output, "Active events:\n");
3721 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3722 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3723 }
3724 
3725 void
3726 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3727 {
3728 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3729 	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3730 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3731 }
3732 
3733 void
3734 event_base_active_by_signal(struct event_base *base, int sig)
3735 {
3736 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3737 	evmap_signal_active_(base, sig, 1);
3738 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3739 }
3740 
3741 
3742 void
3743 event_base_add_virtual_(struct event_base *base)
3744 {
3745 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3746 	base->virtual_event_count++;
3747 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3748 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3749 }
3750 
3751 void
3752 event_base_del_virtual_(struct event_base *base)
3753 {
3754 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3755 	EVUTIL_ASSERT(base->virtual_event_count > 0);
3756 	base->virtual_event_count--;
3757 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3758 		evthread_notify_base(base);
3759 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3760 }
3761 
3762 static void
3763 event_free_debug_globals_locks(void)
3764 {
3765 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3766 #ifndef EVENT__DISABLE_DEBUG_MODE
3767 	if (event_debug_map_lock_ != NULL) {
3768 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3769 		event_debug_map_lock_ = NULL;
3770 		evthreadimpl_disable_lock_debugging_();
3771 	}
3772 #endif /* EVENT__DISABLE_DEBUG_MODE */
3773 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3774 	return;
3775 }
3776 
3777 static void
3778 event_free_debug_globals(void)
3779 {
3780 	event_free_debug_globals_locks();
3781 }
3782 
3783 static void
3784 event_free_evsig_globals(void)
3785 {
3786 	evsig_free_globals_();
3787 }
3788 
3789 static void
3790 event_free_evutil_globals(void)
3791 {
3792 	evutil_free_globals_();
3793 }
3794 
3795 static void
3796 event_free_globals(void)
3797 {
3798 	event_free_debug_globals();
3799 	event_free_evsig_globals();
3800 	event_free_evutil_globals();
3801 }
3802 
3803 void
3804 libevent_global_shutdown(void)
3805 {
3806 	event_disable_debug_mode();
3807 	event_free_globals();
3808 }
3809 
3810 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3811 int
3812 event_global_setup_locks_(const int enable_locks)
3813 {
3814 #ifndef EVENT__DISABLE_DEBUG_MODE
3815 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3816 #endif
3817 	if (evsig_global_setup_locks_(enable_locks) < 0)
3818 		return -1;
3819 	if (evutil_global_setup_locks_(enable_locks) < 0)
3820 		return -1;
3821 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3822 		return -1;
3823 	return 0;
3824 }
3825 #endif
3826 
3827 void
3828 event_base_assert_ok_(struct event_base *base)
3829 {
3830 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3831 	event_base_assert_ok_nolock_(base);
3832 	EVBASE_RELEASE_LOCK(base, th_base_lock);
3833 }
3834 
3835 void
3836 event_base_assert_ok_nolock_(struct event_base *base)
3837 {
3838 	int i;
3839 	int count;
3840 
3841 	/* First do checks on the per-fd and per-signal lists */
3842 	evmap_check_integrity_(base);
3843 
3844 	/* Check the heap property */
3845 	for (i = 1; i < (int)base->timeheap.n; ++i) {
3846 		int parent = (i - 1) / 2;
3847 		struct event *ev, *p_ev;
3848 		ev = base->timeheap.p[i];
3849 		p_ev = base->timeheap.p[parent];
3850 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3851 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3852 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3853 	}
3854 
3855 	/* Check that the common timeouts are fine */
3856 	for (i = 0; i < base->n_common_timeouts; ++i) {
3857 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
3858 		struct event *last=NULL, *ev;
3859 
3860 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3861 
3862 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3863 			if (last)
3864 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3865 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3866 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3867 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3868 			last = ev;
3869 		}
3870 	}
3871 
3872 	/* Check the active queues. */
3873 	count = 0;
3874 	for (i = 0; i < base->nactivequeues; ++i) {
3875 		struct event_callback *evcb;
3876 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3877 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3878 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3879 			EVUTIL_ASSERT(evcb->evcb_pri == i);
3880 			++count;
3881 		}
3882 	}
3883 
3884 	{
3885 		struct event_callback *evcb;
3886 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3887 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3888 			++count;
3889 		}
3890 	}
3891 	EVUTIL_ASSERT(count == base->event_count_active);
3892 }
3893